code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
'''
File : __init__.py
Author : Nadav Samet
Contact : thesamet@gmail.com
Date : 2010 Jun 17
Description : Webilder webshots plugin package.
'''
from webilder.webshots.utils import get_download_list
from webilder.webshots.utils import get_photo_stream
from webilder.webshots.utils import process_photo
def fetch_photo_info(_config, _photo):
"""The photo info for webshots is embedded in the wbz arhchive. So this is
a no op."""
| thesamet/webilder | src/webilder/webshots/__init__.py | Python | bsd-3-clause | 445 |
from auslib.blobs.base import Blob
class WhitelistBlobV1(Blob):
jsonschema = "whitelist.yml"
def __init__(self, **kwargs):
Blob.__init__(self, **kwargs)
if "schema_version" not in self:
self["schema_version"] = 3000
def isWhitelisted(self, requestIMEI):
self.log.debug("Checking to see if IMEI '%s' is whitelisted", requestIMEI)
if any(listItem['imei'] == requestIMEI for listItem in self['whitelist']):
self.log.debug("IMEI is whitelisted")
return True
self.log.debug("IMEI not whitelisted")
return False
def shouldServeUpdate(self, updateQuery):
self.log.debug(updateQuery)
requestIMEI = updateQuery.get('IMEI')
if requestIMEI is not None:
return self.isWhitelisted(requestIMEI)
return False
def containsForbiddenDomain(self, product, whitelistedDomains):
# Since WhitelistBlobs have no URLs
return False
| tieu/balrog | auslib/blobs/whitelist.py | Python | mpl-2.0 | 977 |
import platform as pf
from typing import Any, Iterable, Optional
from .metrics_core import GaugeMetricFamily, Metric
from .registry import Collector, CollectorRegistry, REGISTRY
class PlatformCollector(Collector):
"""Collector for python platform information"""
def __init__(self,
registry: CollectorRegistry = REGISTRY,
platform: Optional[Any] = None,
):
self._platform = pf if platform is None else platform
info = self._info()
system = self._platform.system()
if system == "Java":
info.update(self._java())
self._metrics = [
self._add_metric("python_info", "Python platform information", info)
]
if registry:
registry.register(self)
def collect(self) -> Iterable[Metric]:
return self._metrics
@staticmethod
def _add_metric(name, documentation, data):
labels = data.keys()
values = [data[k] for k in labels]
g = GaugeMetricFamily(name, documentation, labels=labels)
g.add_metric(values, 1)
return g
def _info(self):
major, minor, patchlevel = self._platform.python_version_tuple()
return {
"version": self._platform.python_version(),
"implementation": self._platform.python_implementation(),
"major": major,
"minor": minor,
"patchlevel": patchlevel
}
def _java(self):
java_version, _, vminfo, osinfo = self._platform.java_ver()
vm_name, vm_release, vm_vendor = vminfo
return {
"jvm_version": java_version,
"jvm_release": vm_release,
"jvm_vendor": vm_vendor,
"jvm_name": vm_name
}
PLATFORM_COLLECTOR = PlatformCollector()
"""PlatformCollector in default Registry REGISTRY"""
| prometheus/client_python | prometheus_client/platform_collector.py | Python | apache-2.0 | 1,869 |
# -*- coding: utf-8 -*-
"""
radon.py - Radon and inverse radon transforms
Based on code of Justin K. Romberg
(http://www.clear.rice.edu/elec431/projects96/DSP/bpanalysis.html)
J. Gillam and Chris Griffin.
References:
-B.R. Ramesh, N. Srinivasa, K. Rajgopal, "An Algorithm for Computing
the Discrete Radon Transform With Some Applications", Proceedings of
the Fourth IEEE Region 10 International Conference, TENCON '89, 1989.
-A. C. Kak, Malcolm Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
from __future__ import division
import numpy as np
from scipy.fftpack import fft, ifft, fftfreq
from scipy.interpolate import interp1d
from ._warps_cy import _warp_fast
from ._radon_transform import sart_projection_update
from .. import util
from warnings import warn
__all__ = ["radon", "iradon", "iradon_sart"]
def radon(image, theta=None, circle=False):
"""
Calculates the radon transform of an image given specified
projection angles.
Parameters
----------
image : array_like, dtype=float
Input image. The rotation axis will be located in the pixel with
indices ``(image.shape[0] // 2, image.shape[1] // 2)``.
theta : array_like, dtype=float, optional (default np.arange(180))
Projection angles (in degrees).
circle : boolean, optional
Assume image is zero outside the inscribed circle, making the
width of each projection (the first dimension of the sinogram)
equal to ``min(image.shape)``.
Returns
-------
radon_image : ndarray
Radon transform (sinogram). The tomography rotation axis will lie
at the pixel index ``radon_image.shape[0] // 2`` along the 0th
dimension of ``radon_image``.
"""
if image.ndim != 2:
raise ValueError('The input image must be 2-D')
if theta is None:
theta = np.arange(180)
if circle:
radius = min(image.shape) // 2
c0, c1 = np.ogrid[0:image.shape[0], 0:image.shape[1]]
reconstruction_circle = ((c0 - image.shape[0] // 2) ** 2
+ (c1 - image.shape[1] // 2) ** 2)
reconstruction_circle = reconstruction_circle <= radius ** 2
if not np.all(reconstruction_circle | (image == 0)):
warn('Radon transform: image must be zero outside the '
'reconstruction circle')
# Crop image to make it square
slices = []
for d in (0, 1):
if image.shape[d] > min(image.shape):
excess = image.shape[d] - min(image.shape)
slices.append(slice(int(np.ceil(excess / 2)),
int(np.ceil(excess / 2)
+ min(image.shape))))
else:
slices.append(slice(None))
slices = tuple(slices)
padded_image = image[slices]
else:
diagonal = np.sqrt(2) * max(image.shape)
pad = [int(np.ceil(diagonal - s)) for s in image.shape]
new_center = [(s + p) // 2 for s, p in zip(image.shape, pad)]
old_center = [s // 2 for s in image.shape]
pad_before = [nc - oc for oc, nc in zip(old_center, new_center)]
pad_width = [(pb, p - pb) for pb, p in zip(pad_before, pad)]
padded_image = util.pad(image, pad_width, mode='constant',
constant_values=0)
# padded_image is always square
assert padded_image.shape[0] == padded_image.shape[1]
radon_image = np.zeros((padded_image.shape[0], len(theta)))
center = padded_image.shape[0] // 2
shift0 = np.array([[1, 0, -center],
[0, 1, -center],
[0, 0, 1]])
shift1 = np.array([[1, 0, center],
[0, 1, center],
[0, 0, 1]])
def build_rotation(theta):
T = np.deg2rad(theta)
R = np.array([[np.cos(T), np.sin(T), 0],
[-np.sin(T), np.cos(T), 0],
[0, 0, 1]])
return shift1.dot(R).dot(shift0)
for i in range(len(theta)):
rotated = _warp_fast(padded_image, build_rotation(theta[i]))
radon_image[:, i] = rotated.sum(0)
return radon_image
def _sinogram_circle_to_square(sinogram):
diagonal = int(np.ceil(np.sqrt(2) * sinogram.shape[0]))
pad = diagonal - sinogram.shape[0]
old_center = sinogram.shape[0] // 2
new_center = diagonal // 2
pad_before = new_center - old_center
pad_width = ((pad_before, pad - pad_before), (0, 0))
return util.pad(sinogram, pad_width, mode='constant', constant_values=0)
def iradon(radon_image, theta=None, output_size=None,
filter="ramp", interpolation="linear", circle=False):
"""
Inverse radon transform.
Reconstruct an image from the radon transform, using the filtered
back projection algorithm.
Parameters
----------
radon_image : array_like, dtype=float
Image containing radon transform (sinogram). Each column of
the image corresponds to a projection along a different angle. The
tomography rotation axis should lie at the pixel index
``radon_image.shape[0] // 2`` along the 0th dimension of
``radon_image``.
theta : array_like, dtype=float, optional
Reconstruction angles (in degrees). Default: m angles evenly spaced
between 0 and 180 (if the shape of `radon_image` is (N, M)).
output_size : int
Number of rows and columns in the reconstruction.
filter : str, optional (default ramp)
Filter used in frequency domain filtering. Ramp filter used by default.
Filters available: ramp, shepp-logan, cosine, hamming, hann.
Assign None to use no filter.
interpolation : str, optional (default 'linear')
Interpolation method used in reconstruction. Methods available:
'linear', 'nearest', and 'cubic' ('cubic' is slow).
circle : boolean, optional
Assume the reconstructed image is zero outside the inscribed circle.
Also changes the default output_size to match the behaviour of
``radon`` called with ``circle=True``.
Returns
-------
reconstructed : ndarray
Reconstructed image. The rotation axis will be located in the pixel
with indices
``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.
Notes
-----
It applies the Fourier slice theorem to reconstruct an image by
multiplying the frequency domain of the filter with the FFT of the
projection data. This algorithm is called filtered back projection.
"""
if radon_image.ndim != 2:
raise ValueError('The input image must be 2-D')
if theta is None:
m, n = radon_image.shape
theta = np.linspace(0, 180, n, endpoint=False)
else:
theta = np.asarray(theta)
if len(theta) != radon_image.shape[1]:
raise ValueError("The given ``theta`` does not match the number of "
"projections in ``radon_image``.")
interpolation_types = ('linear', 'nearest', 'cubic')
if not interpolation in interpolation_types:
raise ValueError("Unknown interpolation: %s" % interpolation)
if not output_size:
# If output size not specified, estimate from input radon image
if circle:
output_size = radon_image.shape[0]
else:
output_size = int(np.floor(np.sqrt((radon_image.shape[0]) ** 2
/ 2.0)))
if circle:
radon_image = _sinogram_circle_to_square(radon_image)
th = (np.pi / 180.0) * theta
# resize image to next power of two (but no less than 64) for
# Fourier analysis; speeds up Fourier and lessens artifacts
projection_size_padded = \
max(64, int(2 ** np.ceil(np.log2(2 * radon_image.shape[0]))))
pad_width = ((0, projection_size_padded - radon_image.shape[0]), (0, 0))
img = util.pad(radon_image, pad_width, mode='constant', constant_values=0)
# Construct the Fourier filter
f = fftfreq(projection_size_padded).reshape(-1, 1) # digital frequency
omega = 2 * np.pi * f # angular frequency
fourier_filter = 2 * np.abs(f) # ramp filter
if filter == "ramp":
pass
elif filter == "shepp-logan":
# Start from first element to avoid divide by zero
fourier_filter[1:] = fourier_filter[1:] * np.sin(omega[1:]) / omega[1:]
elif filter == "cosine":
fourier_filter *= np.cos(omega)
elif filter == "hamming":
fourier_filter *= (0.54 + 0.46 * np.cos(omega / 2))
elif filter == "hann":
fourier_filter *= (1 + np.cos(omega / 2)) / 2
elif filter is None:
fourier_filter[:] = 1
else:
raise ValueError("Unknown filter: %s" % filter)
# Apply filter in Fourier domain
projection = fft(img, axis=0) * fourier_filter
radon_filtered = np.real(ifft(projection, axis=0))
# Resize filtered image back to original size
radon_filtered = radon_filtered[:radon_image.shape[0], :]
reconstructed = np.zeros((output_size, output_size))
# Determine the center of the projections (= center of sinogram)
mid_index = radon_image.shape[0] // 2
[X, Y] = np.mgrid[0:output_size, 0:output_size]
xpr = X - int(output_size) // 2
ypr = Y - int(output_size) // 2
# Reconstruct image by interpolation
for i in range(len(theta)):
t = ypr * np.cos(th[i]) - xpr * np.sin(th[i])
x = np.arange(radon_filtered.shape[0]) - mid_index
if interpolation == 'linear':
backprojected = np.interp(t, x, radon_filtered[:, i],
left=0, right=0)
else:
interpolant = interp1d(x, radon_filtered[:, i], kind=interpolation,
bounds_error=False, fill_value=0)
backprojected = interpolant(t)
reconstructed += backprojected
if circle:
radius = output_size // 2
reconstruction_circle = (xpr ** 2 + ypr ** 2) <= radius ** 2
reconstructed[~reconstruction_circle] = 0.
return reconstructed * np.pi / (2 * len(th))
def order_angles_golden_ratio(theta):
"""
Order angles to reduce the amount of correlated information
in subsequent projections.
Parameters
----------
theta : 1D array of floats
Projection angles in degrees. Duplicate angles are not allowed.
Returns
-------
indices_generator : generator yielding unsigned integers
The returned generator yields indices into ``theta`` such that
``theta[indices]`` gives the approximate golden ratio ordering
of the projections. In total, ``len(theta)`` indices are yielded.
All non-negative integers < ``len(theta)`` are yielded exactly once.
Notes
-----
The method used here is that of the golden ratio introduced
by T. Kohler.
References
----------
.. [1] Kohler, T. "A projection access scheme for iterative
reconstruction based on the golden section." Nuclear Science
Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.
.. [2] Winkelmann, Stefanie, et al. "An optimal radial profile order
based on the Golden Ratio for time-resolved MRI."
Medical Imaging, IEEE Transactions on 26.1 (2007): 68-76.
"""
interval = 180
def angle_distance(a, b):
difference = a - b
return min(abs(difference % interval), abs(difference % -interval))
remaining = list(np.argsort(theta)) # indices into theta
# yield an arbitrary angle to start things off
index = remaining.pop(0)
angle = theta[index]
yield index
# determine subsequent angles using the golden ratio method
angle_increment = interval * (1 - (np.sqrt(5) - 1) / 2)
while remaining:
angle = (angle + angle_increment) % interval
insert_point = np.searchsorted(theta[remaining], angle)
index_below = insert_point - 1
index_above = 0 if insert_point == len(remaining) else insert_point
distance_below = angle_distance(angle, theta[remaining[index_below]])
distance_above = angle_distance(angle, theta[remaining[index_above]])
if distance_below < distance_above:
yield remaining.pop(index_below)
else:
yield remaining.pop(index_above)
def iradon_sart(radon_image, theta=None, image=None, projection_shifts=None,
clip=None, relaxation=0.15):
"""
Inverse radon transform
Reconstruct an image from the radon transform, using a single iteration of
the Simultaneous Algebraic Reconstruction Technique (SART) algorithm.
Parameters
----------
radon_image : 2D array, dtype=float
Image containing radon transform (sinogram). Each column of
the image corresponds to a projection along a different angle. The
tomography rotation axis should lie at the pixel index
``radon_image.shape[0] // 2`` along the 0th dimension of
``radon_image``.
theta : 1D array, dtype=float, optional
Reconstruction angles (in degrees). Default: m angles evenly spaced
between 0 and 180 (if the shape of `radon_image` is (N, M)).
image : 2D array, dtype=float, optional
Image containing an initial reconstruction estimate. Shape of this
array should be ``(radon_image.shape[0], radon_image.shape[0])``. The
default is an array of zeros.
projection_shifts : 1D array, dtype=float
Shift the projections contained in ``radon_image`` (the sinogram) by
this many pixels before reconstructing the image. The i'th value
defines the shift of the i'th column of ``radon_image``.
clip : length-2 sequence of floats
Force all values in the reconstructed tomogram to lie in the range
``[clip[0], clip[1]]``
relaxation : float
Relaxation parameter for the update step. A higher value can
improve the convergence rate, but one runs the risk of instabilities.
Values close to or higher than 1 are not recommended.
Returns
-------
reconstructed : ndarray
Reconstructed image. The rotation axis will be located in the pixel
with indices
``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.
Notes
-----
Algebraic Reconstruction Techniques are based on formulating the tomography
reconstruction problem as a set of linear equations. Along each ray,
the projected value is the sum of all the values of the cross section along
the ray. A typical feature of SART (and a few other variants of algebraic
techniques) is that it samples the cross section at equidistant points
along the ray, using linear interpolation between the pixel values of the
cross section. The resulting set of linear equations are then solved using
a slightly modified Kaczmarz method.
When using SART, a single iteration is usually sufficient to obtain a good
reconstruction. Further iterations will tend to enhance high-frequency
information, but will also often increase the noise.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
.. [2] AH Andersen, AC Kak, "Simultaneous algebraic reconstruction
technique (SART): a superior implementation of the ART algorithm",
Ultrasonic Imaging 6 pp 81--94 (1984)
.. [3] S Kaczmarz, "Angenäherte auflösung von systemen linearer
gleichungen", Bulletin International de l’Academie Polonaise des
Sciences et des Lettres 35 pp 355--357 (1937)
.. [4] Kohler, T. "A projection access scheme for iterative
reconstruction based on the golden section." Nuclear Science
Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.
.. [5] Kaczmarz' method, Wikipedia,
http://en.wikipedia.org/wiki/Kaczmarz_method
"""
if radon_image.ndim != 2:
raise ValueError('radon_image must be two dimensional')
reconstructed_shape = (radon_image.shape[0], radon_image.shape[0])
if theta is None:
theta = np.linspace(0, 180, radon_image.shape[1], endpoint=False)
elif theta.shape != (radon_image.shape[1],):
raise ValueError('Shape of theta (%s) does not match the '
'number of projections (%d)'
% (projection_shifts.shape, radon_image.shape[1]))
if image is None:
image = np.zeros(reconstructed_shape, dtype=np.float)
elif image.shape != reconstructed_shape:
raise ValueError('Shape of image (%s) does not match first dimension '
'of radon_image (%s)'
% (image.shape, reconstructed_shape))
if projection_shifts is None:
projection_shifts = np.zeros((radon_image.shape[1],), dtype=np.float)
elif projection_shifts.shape != (radon_image.shape[1],):
raise ValueError('Shape of projection_shifts (%s) does not match the '
'number of projections (%d)'
% (projection_shifts.shape, radon_image.shape[1]))
if not clip is None:
if len(clip) != 2:
raise ValueError('clip must be a length-2 sequence')
clip = (float(clip[0]), float(clip[1]))
relaxation = float(relaxation)
for angle_index in order_angles_golden_ratio(theta):
image_update = sart_projection_update(image, theta[angle_index],
radon_image[:, angle_index],
projection_shifts[angle_index])
image += relaxation * image_update
if not clip is None:
image = np.clip(image, clip[0], clip[1])
return image
| Hiyorimi/scikit-image | skimage/transform/radon_transform.py | Python | bsd-3-clause | 17,914 |
# convolutional autoencoder
# Based on
# https://github.com/ageron/handson-ml2/blob/master/17_autoencoders_and_gans.ipynb
import superimport
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import tensorflow as tf
from tensorflow import keras
import pyprobml_utils as pml
devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(devices[0], True)
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_train_full = X_train_full.astype(np.float32) / 255
X_test = X_test.astype(np.float32) / 255
X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]
y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]
tf.random.set_seed(42)
np.random.seed(42)
conv_encoder = keras.models.Sequential([
keras.layers.Reshape([28, 28, 1], input_shape=[28, 28]),
keras.layers.Conv2D(16, kernel_size=3, padding="SAME", activation="selu"),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(32, kernel_size=3, padding="SAME", activation="selu"),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(64, kernel_size=3, padding="SAME", activation="selu"),
keras.layers.MaxPool2D(pool_size=2)
])
conv_decoder = keras.models.Sequential([
keras.layers.Conv2DTranspose(32, kernel_size=3, strides=2, padding="VALID", activation="selu",
input_shape=[3, 3, 64]),
keras.layers.Conv2DTranspose(16, kernel_size=3, strides=2, padding="SAME", activation="selu"),
keras.layers.Conv2DTranspose(1, kernel_size=3, strides=2, padding="SAME", activation="sigmoid"),
keras.layers.Reshape([28, 28])
])
conv_ae = keras.models.Sequential([conv_encoder, conv_decoder])
def rounded_accuracy(y_true, y_pred):
return keras.metrics.binary_accuracy(tf.round(y_true), tf.round(y_pred))
conv_ae.compile(loss="binary_crossentropy", optimizer=keras.optimizers.SGD(lr=1.0),
metrics=[rounded_accuracy])
history = conv_ae.fit(X_train, X_train, epochs=5,
validation_data=[X_valid, X_valid])
def plot_image(image):
plt.imshow(image, cmap="binary")
plt.axis("off")
def show_reconstructions(model, images=X_valid, n_images=5):
reconstructions = model.predict(images[:n_images])
plt.figure(figsize=(n_images * 1.5, 3))
for image_index in range(n_images):
plt.subplot(2, n_images, 1 + image_index)
plot_image(images[image_index])
plt.subplot(2, n_images, 1 + n_images + image_index)
plot_image(reconstructions[image_index])
show_reconstructions(conv_ae)
pml.savefig('ae-cnn-fashion-recon.pdf')
plt.show()
| probml/pyprobml | scripts/conv_ae_tf.py | Python | mit | 2,692 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
import numpy as np
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
y = np.array([1, 1, 1, 2, 2, 2])
clf = QuadraticDiscriminantAnalysis()
clf.fit(X, y)
print(clf.predict([[-0.8, -1]]))
| davidam/python-examples | scikit/qda.py | Python | gpl-3.0 | 1,179 |
import pytest
import requests
import requests_mock
from pybib import utils
from hamcrest import *
class MockRequest:
def __init__(self, code):
self.code = code
@property
def status_code(self):
return self.code
def test_handle_status_code_200():
utils.handle_status_code(MockRequest(200))
def test_handle_status_code_404():
with pytest.raises(SystemExit):
utils.handle_status_code(MockRequest(404))
def test_handle_status_code_unknown():
with pytest.raises(SystemExit):
utils.handle_status_code(MockRequest(1))
def test_search():
search_json_response = """
{
"message-version": "1.0.0",
"message": {
"facets": {},
"query": {
"start-index": 0,
"search-terms": "test"
},
"total-results": 1,
"items": [{
"source": "CrossRef",
"title": ["Test Citation"],
"type": "dissertation", "URL": "http://dx.doi.org/test.doi",
"deposited": {"timestamp": 1000, "date-parts": [[2015, 1, 1]]},
"container-title": [],
"author": [{"given": "Test", "affiliation": [], "family": "Test"}],
"reference-count": 0,
"member": "http://id.crossref.org/member/xxx",
"subtitle": [],
"indexed": { "timestamp": 1000, "date-parts": [[2015, 1, 1]] },
"prefix": "http://id.crossref.org/prefix/test",
"publisher": "Test Publisher",
"score": 1.0,
"DOI": "test.doi",
"issued": { "date-parts": [[]] }
}]
}
}
"""
with requests_mock.mock() as m:
m.get('http://api.crossref.org/works', text=search_json_response)
entries = utils.search('test.doi')
print(entries)
assert_that(len(entries), equal_to(1))
entry = entries[0]
assert_that(entry["title"], equal_to(["Test Citation"]))
def test_get_bibtex():
with requests_mock.mock() as m:
m.get('http://dx.doi.org/test.doi', text='abc')
entry = utils.get_bibtex('test.doi')
assert_that(entry, equal_to('abc'))
| jgilchrist/pybib | tests/test_utils.py | Python | bsd-3-clause | 2,228 |
"""
Support for NX584 alarm control panels.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel.nx584/
"""
import logging
import requests
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pynx584==0.4']
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'NX584'
DEFAULT_PORT = 5007
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NX584 platform."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = 'http://{}:{}'.format(host, port)
try:
add_entities([NX584Alarm(hass, url, name)])
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to NX584: %s", str(ex))
return
class NX584Alarm(alarm.AlarmControlPanel):
"""Representation of a NX584-based alarm panel."""
def __init__(self, hass, url, name):
"""Init the nx584 alarm panel."""
from nx584 import client
self._hass = hass
self._name = name
self._url = url
self._alarm = client.Client(self._url)
# Do an initial list operation so that we will try to actually
# talk to the API and trigger a requests exception for setup_platform()
# to catch
self._alarm.list_zones()
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def code_format(self):
"""Return one or more digits/characters."""
return alarm.FORMAT_NUMBER
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Process new events from panel."""
try:
part = self._alarm.list_partitions()[0]
zones = self._alarm.list_zones()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to %(host)s: %(reason)s",
dict(host=self._url, reason=ex))
self._state = None
zones = []
except IndexError:
_LOGGER.error("NX584 reports no partitions")
self._state = None
zones = []
bypassed = False
for zone in zones:
if zone['bypassed']:
_LOGGER.debug("Zone %(zone)s is bypassed, assuming HOME",
dict(zone=zone['number']))
bypassed = True
break
if not part['armed']:
self._state = STATE_ALARM_DISARMED
elif bypassed:
self._state = STATE_ALARM_ARMED_HOME
else:
self._state = STATE_ALARM_ARMED_AWAY
for flag in part['condition_flags']:
if flag == "Siren on":
self._state = STATE_ALARM_TRIGGERED
def alarm_disarm(self, code=None):
"""Send disarm command."""
self._alarm.disarm(code)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self._alarm.arm('stay')
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self._alarm.arm('exit')
| jamespcole/home-assistant | homeassistant/components/nx584/alarm_control_panel.py | Python | apache-2.0 | 3,842 |
# Copyright Ralf W. Grosse-Kunstleve 2006. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import std_pair_ext
assert std_pair_ext.foo() == (3, 5)
print "OK"
| alexa-infra/negine | thirdparty/boost-python/libs/python/example/test_std_pair.py | Python | mit | 267 |
from .Exporter import Exporter
from pyqtgraph.parametertree import Parameter
from pyqtgraph.Qt import QtGui, QtCore, QtSvg, USE_PYSIDE
import pyqtgraph as pg
import numpy as np
__all__ = ['ImageExporter']
class ImageExporter(Exporter):
Name = "Image File (PNG, TIF, JPG, ...)"
allowCopy = True
def __init__(self, item):
Exporter.__init__(self, item)
tr = self.getTargetRect()
if isinstance(item, QtGui.QGraphicsItem):
scene = item.scene()
else:
scene = item
bgbrush = scene.views()[0].backgroundBrush()
bg = bgbrush.color()
if bgbrush.style() == QtCore.Qt.NoBrush:
bg.setAlpha(0)
self.params = Parameter(name='params', type='group', children=[
{'name': 'width', 'type': 'int', 'value': tr.width(), 'limits': (0, None)},
{'name': 'height', 'type': 'int', 'value': tr.height(), 'limits': (0, None)},
{'name': 'antialias', 'type': 'bool', 'value': True},
{'name': 'background', 'type': 'color', 'value': bg},
])
self.params.param('width').sigValueChanged.connect(self.widthChanged)
self.params.param('height').sigValueChanged.connect(self.heightChanged)
def widthChanged(self):
sr = self.getSourceRect()
ar = float(sr.height()) / sr.width()
self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = float(sr.width()) / sr.height()
self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)
def parameters(self):
return self.params
def export(self, fileName=None, toBytes=False, copy=False):
if fileName is None and not toBytes and not copy:
if USE_PYSIDE:
filter = ["*."+str(f) for f in QtGui.QImageWriter.supportedImageFormats()]
else:
filter = ["*."+bytes(f).decode('utf-8') for f in QtGui.QImageWriter.supportedImageFormats()]
preferred = ['*.png', '*.tif', '*.jpg']
for p in preferred[::-1]:
if p in filter:
filter.remove(p)
filter.insert(0, p)
self.fileSaveDialog(filter=filter)
return
targetRect = QtCore.QRect(0, 0, self.params['width'], self.params['height'])
sourceRect = self.getSourceRect()
#self.png = QtGui.QImage(targetRect.size(), QtGui.QImage.Format_ARGB32)
#self.png.fill(pyqtgraph.mkColor(self.params['background']))
w, h = self.params['width'], self.params['height']
if w == 0 or h == 0:
raise Exception("Cannot export image with size=0 (requested export size is %dx%d)" % (w,h))
bg = np.empty((self.params['width'], self.params['height'], 4), dtype=np.ubyte)
color = self.params['background']
bg[:,:,0] = color.blue()
bg[:,:,1] = color.green()
bg[:,:,2] = color.red()
bg[:,:,3] = color.alpha()
self.png = pg.makeQImage(bg, alpha=True)
## set resolution of image:
origTargetRect = self.getTargetRect()
resolutionScale = targetRect.width() / origTargetRect.width()
#self.png.setDotsPerMeterX(self.png.dotsPerMeterX() * resolutionScale)
#self.png.setDotsPerMeterY(self.png.dotsPerMeterY() * resolutionScale)
painter = QtGui.QPainter(self.png)
#dtr = painter.deviceTransform()
try:
self.setExportMode(True, {'antialias': self.params['antialias'], 'background': self.params['background'], 'painter': painter, 'resolutionScale': resolutionScale})
painter.setRenderHint(QtGui.QPainter.Antialiasing, self.params['antialias'])
self.getScene().render(painter, QtCore.QRectF(targetRect), QtCore.QRectF(sourceRect))
finally:
self.setExportMode(False)
painter.end()
if copy:
QtGui.QApplication.clipboard().setImage(self.png)
elif toBytes:
return self.png
else:
self.png.save(fileName)
| pixlra/HARP-fork | Various/ThirdParty/pyqtgraph/exporters/ImageExporter.py | Python | gpl-3.0 | 4,274 |
import sys
import unittest
import webbrowser
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5 import QtTest
from PyQt5 import QtCore
from unittest.mock import MagicMock
# set up import paths
import sas.qtgui.path_prepare
# Local
import sas.qtgui.Utilities.GuiUtils as GuiUtils
from sas.qtgui.Perspectives.Fitting.Constraint import Constraint
from sas.qtgui.Plotting.PlotterData import Data1D
from sas.qtgui.Perspectives.Fitting.FittingPerspective import FittingWindow
if not QtWidgets.QApplication.instance():
app = QtWidgets.QApplication(sys.argv)
class FittingPerspectiveTest(unittest.TestCase):
'''Test the Fitting Perspective'''
def setUp(self):
class dummy_manager(object):
def communicator(self):
return GuiUtils.Communicate()
communicate = GuiUtils.Communicate()
'''Create the perspective'''
self.widget = FittingWindow(dummy_manager())
def tearDown(self):
'''Destroy the perspective'''
self.widget.close()
self.widget = None
def testDefaults(self):
'''Test the GUI in its default state'''
self.assertIsInstance(self.widget, QtWidgets.QWidget)
self.assertIn("Fit panel", self.widget.windowTitle())
self.assertEqual(self.widget.optimizer, "Levenberg-Marquardt")
self.assertEqual(len(self.widget.tabs), 1)
self.assertEqual(self.widget.maxIndex, 2)
self.assertEqual(self.widget.getTabName(), "FitPage2")
def testAddTab(self):
'''Add a tab and test it'''
# Add an empty tab
self.widget.addFit(None)
self.assertEqual(len(self.widget.tabs), 2)
self.assertEqual(self.widget.getTabName(), "FitPage3")
self.assertEqual(self.widget.maxIndex, 3)
# Add an empty batch tab
self.widget.addFit(None, is_batch=True)
self.assertEqual(len(self.widget.tabs), 3)
self.assertEqual(self.widget.getTabName(2), "BatchPage4")
self.assertEqual(self.widget.maxIndex, 4)
def testAddCSTab(self):
''' Add a constraint/simult tab'''
self.widget.addConstraintTab()
self.assertEqual(len(self.widget.tabs), 2)
self.assertEqual(self.widget.getCSTabName(), "Const. & Simul. Fit")
def testResetTab(self):
''' Remove data from last tab'''
self.assertEqual(len(self.widget.tabs), 1)
self.assertEqual(self.widget.getTabName(), "FitPage2")
self.assertEqual(self.widget.maxIndex, 2)
# Attempt to remove the last tab
self.widget.resetTab(0)
# see that the tab didn't disappear, just changed the name/id
self.assertEqual(len(self.widget.tabs), 1)
self.assertEqual(self.widget.getTabName(), "FitPage3")
self.assertEqual(self.widget.maxIndex, 3)
# Now, add data
data = Data1D(x=[1,2], y=[1,2])
GuiUtils.dataFromItem = MagicMock(return_value=data)
item = QtGui.QStandardItem("test")
self.widget.setData([item])
# Assert data is on widget
self.assertEqual(len(self.widget.tabs[0].all_data), 1)
# Reset the tab
self.widget.resetTab(0)
# See that the tab contains data no more
self.assertEqual(len(self.widget.tabs[0].all_data), 0)
def testCloseTab(self):
'''Delete a tab and test'''
# Add an empty tab
self.widget.addFit(None)
# Remove the original tab
self.widget.tabCloses(1)
self.assertEqual(len(self.widget.tabs), 1)
self.assertEqual(self.widget.maxIndex, 3)
self.assertEqual(self.widget.getTabName(), "FitPage3")
# Attemtp to remove the last tab
self.widget.tabCloses(1)
# The tab should still be there
self.assertEqual(len(self.widget.tabs), 1)
self.assertEqual(self.widget.maxIndex, 4)
self.assertEqual(self.widget.getTabName(), "FitPage4")
def testAllowBatch(self):
'''Assure the perspective allows multiple datasets'''
self.assertTrue(self.widget.allowBatch())
def testSetData(self):
''' Assure that setting data is correct'''
with self.assertRaises(AssertionError):
self.widget.setData(None)
with self.assertRaises(AttributeError):
self.widget.setData("BOOP")
# Mock the datafromitem() call from FittingWidget
data = Data1D(x=[1,2], y=[1,2])
GuiUtils.dataFromItem = MagicMock(return_value=data)
item = QtGui.QStandardItem("test")
self.widget.setData([item])
# First tab should accept data
self.assertEqual(len(self.widget.tabs), 1)
# Add another set of data
self.widget.setData([item])
# Now we should have two tabs
self.assertEqual(len(self.widget.tabs), 2)
# Add two more items in a list
self.widget.setData([item, item])
# Check for 4 tabs
self.assertEqual(len(self.widget.tabs), 4)
def testSwapData(self):
'''Assure that data swapping is correct'''
# Mock the datafromitem() call from FittingWidget
data1 = Data1D(x=[3,4], y=[3,4])
GuiUtils.dataFromItem = MagicMock(return_value=data1)
# Add a new tab
item = QtGui.QStandardItem("test")
self.widget.setData([item])
# Create a new dataset and mock the datafromitemcall()
data2 = Data1D(x=[1,2], y=[1,2])
GuiUtils.dataFromItem = MagicMock(return_value=data2)
# Swap the data
self.widget.swapData(item)
# Check that data has been swapped
self.assertEqual(self.widget.tabs[0].data, data2)
# We should only have one tab
self.assertEqual(len(self.widget.tabs), 1)
# send something stupid as data
item = "foo"
# It should raise an AttributeError
self.assertRaises(AttributeError, self.widget.swapData, item)
# Create a batch tab
item = QtGui.QStandardItem("test")
self.widget.addFit(None, is_batch=True)
# It should raise an exception
self.assertRaises(RuntimeError, self.widget.swapData, item)
# Create a non valid tab
self.widget.addConstraintTab()
# It should raise a TypeError
self.assertRaises(TypeError, self.widget.swapData, item)
def testSetBatchData(self):
''' Assure that setting batch data is correct'''
# Mock the datafromitem() call from FittingWidget
data1 = Data1D(x=[1,2], y=[1,2])
data2 = Data1D(x=[1,2], y=[1,2])
data_batch = [data1, data2]
GuiUtils.dataFromItem = MagicMock(return_value=data1)
item = QtGui.QStandardItem("test")
self.widget.setData([item, item], is_batch=True)
# First tab should not accept data
self.assertEqual(len(self.widget.tabs), 2)
# Add another set of data
self.widget.setData([item, item], is_batch=True)
# Now we should have two batch tabs
self.assertEqual(len(self.widget.tabs), 3)
# Check the names of the new tabs
self.assertEqual(self.widget.tabText(1), "BatchPage2")
self.assertEqual(self.widget.tabText(2), "BatchPage3")
def testGetFitTabs(self):
'''test the fit tab getter method'''
# Add an empty tab
self.widget.addFit(None)
# Get the tabs
tabs = self.widget.getFitTabs()
self.assertTrue(isinstance(tabs, list))
self.assertEqual(len(tabs), 2)
def testGetActiveConstraintList(self):
'''test the active constraint getter'''
# Add an empty tab
self.widget.addFit(None)
# mock the getConstraintsForModel method of the FittingWidget tab of
# the first tab
tab = self.widget.tabs[0]
tab.getConstraintsForModel = MagicMock(return_value=[("scale",
"M2.scale +2")])
# mock the getConstraintsForModel method of the FittingWidget tab of
# the second tab
tab = self.widget.tabs[1]
tab.getConstraintsForModel = MagicMock(return_value=[("scale",
"M2.background "
"+2")])
constraints = self.widget.getActiveConstraintList()
# we should have 2 constraints
self.assertEqual(len(constraints), 2)
self.assertEqual(constraints, [("M1.scale", "M2.scale +2"),
('M2.scale', 'M2.background +2')])
def testGetSymbolDictForConstraints(self):
'''test the symbol dict getter'''
# Add an empty tab
self.widget.addFit(None)
# mock the getSymbolDict method of the first tab
tab = self.widget.tabs[0]
tab.getSymbolDict = MagicMock(return_value={"M1.scale": 1})
# mock the getSymbolDict method of the second tab
tab = self.widget.tabs[1]
tab.getSymbolDict = MagicMock(return_value={"M2.scale": 1})
symbols = self.widget.getSymbolDictForConstraints()
# we should have 2 symbols
self.assertEqual(len(symbols), 2)
self.assertEqual(list(symbols.keys()), ["M1.scale", "M2.scale"])
def testGetConstraintTab(self):
'''test the constraint tab getter'''
# no constraint tab is present, should return None
constraint_tab = self.widget.getConstraintTab()
self.assertEqual(constraint_tab, None)
# add a constraint tab
self.widget.addConstraintTab()
constraint_tab = self.widget.getConstraintTab()
self.assertEquals(constraint_tab, self.widget.tabs[1])
def testSerialization(self):
''' Serialize fit pages and check data '''
self.assertTrue(hasattr(self.widget, 'isSerializable'))
self.assertTrue(self.widget.isSerializable())
data = Data1D(x=[1,2], y=[1,2])
GuiUtils.dataFromItem = MagicMock(return_value=data)
item = QtGui.QStandardItem("test")
self.widget.setData([item])
tab = self.widget.tabs[0]
cbCat = tab.cbCategory
cbModel = tab.cbModel
cbCat.setCurrentIndex(cbCat.findText("Cylinder"))
cbModel.setCurrentIndex(cbModel.findText("barbell"))
data_id = str(self.widget.currentTabDataId()[0])
# check values - disabled control, present weights
rowcount = tab._model_model.rowCount()
self.assertEqual(rowcount, 8)
state_default = self.widget.serializeAll()
state_all = self.widget.serializeAllFitpage()
state_cp = self.widget.serializeCurrentPage()
page = self.widget.getSerializedFitpage(self.widget.currentTab)
# Pull out params from state
params = state_all[data_id]['fit_params'][0]
# Tests
self.assertEqual(len(state_all), len(state_default))
self.assertEqual(len(state_cp), len(page))
self.assertEqual(len(state_all), 1)
# getPage should include an extra param 'data_id' removed by serialize
self.assertNotEqual(len(params), len(page))
self.assertEqual(len(params), 28)
self.assertEqual(page.get('data_id', None), None)
def testUpdateFromConstraints(self):
'''tests the method that parses the loaded project dict and retuens a dict with constrains across all fit pages'''
# create a constraint dict with one constraint for fit pages 1 and 2
constraint_dict = {'M1': [['scale', 'scale', 'M1.scale', True,
'M2.scale']],
'M2': [['background', 'background',
'M2.background', True, 'M1.background']]}
# add a second tab
self.widget.addFit(None)
tab1 = self.widget.tabs[0]
tab2 = self.widget.tabs[1]
# mock the getRowFromName methods from both tabs
tab1.getRowFromName = MagicMock(return_value=0)
tab2.getRowFromName = MagicMock(return_value=1)
# mock the addConstraintToRow method of both tabs
tab1.addConstraintToRow = MagicMock()
tab2.addConstraintToRow = MagicMock()
# add the constraints
self.widget.updateFromConstraints(constraint_dict)
# check that getRowFromName was called correctly on both tabs
tab1.getRowFromName.assert_called_with("scale")
tab2.getRowFromName.assert_called_with("background")
# check that addConstraintToRow was called correctly
constraint1 = Constraint(param='scale',
value='scale',
value_ex="M1.scale",
func="M2.scale")
constraint2 = Constraint(param='background',
value='background',
value_ex="M2.background",
func="M1.background")
tab1_call_dict = tab1.addConstraintToRow.call_args[1]
tab2_call_dict = tab2.addConstraintToRow.call_args[1]
self.assertEqual(vars(tab1_call_dict['constraint']), vars(constraint1))
self.assertEqual(vars(tab2_call_dict['constraint']), vars(constraint2))
self.assertEqual(tab1_call_dict['row'], 0)
self.assertEqual(tab2_call_dict['row'], 1)
def testGetTabByName(self):
'''test getting a tab by its name'''
# add a second tab
self.widget.addFit(None)
# get the second tab
tab = self.widget.getTabByName('M2')
self.assertEqual(tab, self.widget.tabs[1])
# get some unexisting tab
tab = self.widget.getTabByName('foo')
self.assertFalse(tab)
if __name__ == "__main__":
unittest.main()
| SasView/sasview | src/sas/qtgui/Perspectives/Fitting/UnitTesting/FittingPerspectiveTest.py | Python | bsd-3-clause | 13,718 |
# -*- coding:utf8 -*-
import os
import dsf
def clone(user, repo, target_name=None, cwd=None, branch='master'):
# make sure we know where we are cloning to
if cwd is None:
cwd = os.getcwd()
if target_name is None:
target_name=repo
# this is where we are cloning to
target_dir = os.path.join(cwd, target_name)
# make sure that the target_dir exists
dsf.core.fs.ensure_folder_exists(cwd)
# do we already have a Storyplayer repo in there?
if not dsf.core.git.is_repository(cwd=target_dir):
# build the command that we are running
cmd = ['git', 'clone', 'https://github.com/' + user + '/' + repo + '.git', target_name]
# run the command
retval = dsf.core.shell.run(cmd, cwd=cwd)
# what happened?
if retval != 0:
raise RuntimeError("git clone failed")
# are we on the right branch?
if dsf.core.git.get_current_branch(cwd=target_dir) != branch:
dsf.core.git.change_branch(branch, cwd=target_dir) | devsetup/devsetup_framework | _vcs/github_https.py | Python | bsd-3-clause | 929 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
import pep8
class UtilTestCase(unittest.TestCase):
def test_normalize_paths(self):
cwd = os.getcwd()
self.assertEqual(pep8.normalize_paths(''), [])
self.assertEqual(pep8.normalize_paths([]), [])
self.assertEqual(pep8.normalize_paths(None), [])
self.assertEqual(pep8.normalize_paths(['foo']), ['foo'])
self.assertEqual(pep8.normalize_paths('foo'), ['foo'])
self.assertEqual(pep8.normalize_paths('foo,bar'), ['foo', 'bar'])
self.assertEqual(pep8.normalize_paths('foo, bar '), ['foo', 'bar'])
self.assertEqual(pep8.normalize_paths('/foo/bar,baz/../bat'),
['/foo/bar', cwd + '/bat'])
self.assertEqual(pep8.normalize_paths(".pyc,\n build/*"),
['.pyc', cwd + '/build/*'])
| SpectraLogic/samba | third_party/pep8/testsuite/test_util.py | Python | gpl-3.0 | 880 |
# Copyright (c) 2015, Nordic Semiconductor
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Python standard library
import os
import tempfile
import shutil
# 3rd party libraries
from zipfile import ZipFile
#from cryptography.hazmat.backends import default_backend
#from cryptography.hazmat.primitives import hashes
import hashlib
# Nordic libraries
from nordicsemi.exceptions import NotImplementedException, NordicSemiException
from nordicsemi.dfu.nrfhex import *
from nordicsemi.dfu.init_packet import *
from nordicsemi.dfu.manifest import ManifestGenerator, Manifest
from nordicsemi.dfu.model import HexType, FirmwareKeys
from nordicsemi.dfu.crc16 import *
class Package(object):
"""
Packages and unpacks Nordic DFU packages. Nordic DFU packages are zip files that contains firmware and meta-information
necessary for utilities to perform a DFU on nRF5X devices.
The internal data model used in Package is a dictionary. The dictionary is expressed like this in
json format:
{
"manifest": {
"bootloader": {
"bin_file": "asdf.bin",
"dat_file": "asdf.dat",
"init_packet_data": {
"application_version": null,
"compression_type": 0,
"device_revision": null,
"device_type": 5,
"firmware_hash": "asdfasdkfjhasdkfjashfkjasfhaskjfhkjsdfhasjkhf",
"packet_version": 1,
"softdevice_req": [
17,
18
]
}
}
}
Attributes application, bootloader, softdevice, softdevice_bootloader shall not be put into the manifest if they are null
"""
MANIFEST_FILENAME = "manifest.json"
def __init__(self,
dev_type=None,
dev_rev=None,
app_version=None,
sd_req=None,
app_fw=None,
bootloader_fw=None,
softdevice_fw=None,
dfu_ver=0.7,
key_file=None):
"""
Constructor that requires values used for generating a Nordic DFU package.
:param int dev_type: Device type init-packet field
:param int dev_rev: Device revision init-packet field
:param int app_version: App version init-packet field
:param list sd_req: Softdevice Requirement init-packet field
:param str app_fw: Path to application firmware file
:param str bootloader_fw: Path to bootloader firmware file
:param str softdevice_fw: Path to softdevice firmware file
:param float dfu_ver: DFU version to use when generating init-packet
:param str key_file Path to Signing key file (PEM)
:return: None
"""
self.dfu_ver = dfu_ver
#hello
init_packet_vars = {}
if dev_type is not None:
init_packet_vars[PacketField.DEVICE_TYPE] = dev_type
if dev_rev is not None:
init_packet_vars[PacketField.DEVICE_REVISION] = dev_rev
if app_version is not None:
init_packet_vars[PacketField.APP_VERSION] = app_version
if sd_req is not None:
init_packet_vars[PacketField.REQUIRED_SOFTDEVICES_ARRAY] = sd_req
self.firmwares_data = {}
if app_fw:
self.__add_firmware_info(HexType.APPLICATION,
app_fw,
init_packet_vars)
if bootloader_fw:
self.__add_firmware_info(HexType.BOOTLOADER,
bootloader_fw,
init_packet_vars)
if softdevice_fw:
self.__add_firmware_info(HexType.SOFTDEVICE,
softdevice_fw,
init_packet_vars)
if key_file:
self.dfu_ver = 0.8
self.key_file = key_file
def generate_package(self, filename, preserve_work_directory=False):
"""
Generates a Nordic DFU package. The package is a zip file containing firmware(s) and metadata required
for Nordic DFU applications to perform DFU onn nRF5X devices.
:param str filename: Filename for generated package.
:param bool preserve_work_directory: True to preserve the temporary working directory.
Useful for debugging of a package, and if the user wants to look at the generated package without having to
unzip it.
:return: None
"""
work_directory = self.__create_temp_workspace()
if Package._is_bootloader_softdevice_combination(self.firmwares_data):
# Removing softdevice and bootloader data from dictionary and adding the combined later
softdevice_fw_data = self.firmwares_data.pop(HexType.SOFTDEVICE)
bootloader_fw_data = self.firmwares_data.pop(HexType.BOOTLOADER)
softdevice_fw_name = softdevice_fw_data[FirmwareKeys.FIRMWARE_FILENAME]
bootloader_fw_name = bootloader_fw_data[FirmwareKeys.FIRMWARE_FILENAME]
new_filename = "sd_bl.bin"
sd_bl_file_path = os.path.join(work_directory, new_filename)
nrf_hex = nRFHex(softdevice_fw_name, bootloader_fw_name)
nrf_hex.tobinfile(sd_bl_file_path)
softdevice_size = nrf_hex.size()
bootloader_size = nrf_hex.bootloadersize()
self.__add_firmware_info(HexType.SD_BL,
sd_bl_file_path,
softdevice_fw_data[FirmwareKeys.INIT_PACKET_DATA],
softdevice_size,
bootloader_size)
for key in self.firmwares_data:
firmware = self.firmwares_data[key]
# Normalize the firmware file and store it in the work directory
firmware[FirmwareKeys.BIN_FILENAME] = \
Package.normalize_firmware_to_bin(work_directory, firmware[FirmwareKeys.FIRMWARE_FILENAME])
# Calculate the hash for the .bin file located in the work directory
bin_file_path = os.path.join(work_directory, firmware[FirmwareKeys.BIN_FILENAME])
init_packet_data = firmware[FirmwareKeys.INIT_PACKET_DATA]
if self.dfu_ver < 0.7:
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID] = INIT_PACKET_USES_CRC16
firmware_hash = Package.calculate_crc16(bin_file_path)
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_CRC16] = firmware_hash
elif self.dfu_ver == 0.7:
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID] = INIT_PACKET_USES_HASH
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH] = int(Package.calculate_file_size(bin_file_path))
firmware_hash = Package.calculate_sha256_hash(bin_file_path)
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH] = firmware_hash
elif self.dfu_ver == 0.8:
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID] = INIT_PACKET_EXT_USES_ECDS
firmware_hash = Package.calculate_sha256_hash(bin_file_path)
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH] = int(Package.calculate_file_size(bin_file_path))
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH] = firmware_hash
temp_packet = self._create_init_packet(firmware)
# Store the .dat file in the work directory
init_packet = self._create_init_packet(firmware)
init_packet_filename = firmware[FirmwareKeys.BIN_FILENAME].replace(".bin", ".dat")
with open(os.path.join(work_directory, init_packet_filename), 'wb') as init_packet_file:
init_packet_file.write(init_packet)
firmware[FirmwareKeys.DAT_FILENAME] = \
init_packet_filename
# Store the manifest to manifest.json
manifest = self.create_manifest()
with open(os.path.join(work_directory, Package.MANIFEST_FILENAME), "w") as manifest_file:
manifest_file.write(manifest)
# Package the work_directory to a zip file
Package.create_zip_package(work_directory, filename)
# Delete the temporary directory
if not preserve_work_directory:
shutil.rmtree(work_directory)
@staticmethod
def __create_temp_workspace():
return tempfile.mkdtemp(prefix="nrf_dfu_")
@staticmethod
def create_zip_package(work_directory, filename):
files = os.listdir(work_directory)
with ZipFile(filename, 'w') as package:
for _file in files:
file_path = os.path.join(work_directory, _file)
package.write(file_path, _file)
@staticmethod
def calculate_file_size(firmware_filename):
b = os.path.getsize(firmware_filename)
return b
@staticmethod
def calculate_sha256_hash(firmware_filename):
read_buffer = 4096
#digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest = hashlib.sha256()
with open(firmware_filename, 'rb') as firmware_file:
while True:
data = firmware_file.read(read_buffer)
if data:
digest.update(data)
else:
break
#return digest.finalize()
return digest.digest()
@staticmethod
def calculate_crc16(firmware_filename):
"""
Calculates CRC16 has on provided firmware filename
:type str firmware_filename:
"""
data_buffer = b''
read_size = 4096
with open(firmware_filename, 'rb') as firmware_file:
while True:
data = firmware_file.read(read_size)
if data:
data_buffer += data
else:
break
return calc_crc16(data_buffer, 0xffff)
def create_manifest(self):
manifest = ManifestGenerator(self.dfu_ver, self.firmwares_data)
return manifest.generate_manifest()
@staticmethod
def _is_bootloader_softdevice_combination(firmwares):
return (HexType.BOOTLOADER in firmwares) and (HexType.SOFTDEVICE in firmwares)
def __add_firmware_info(self, firmware_type, filename, init_packet_data, sd_size=None, bl_size=None):
self.firmwares_data[firmware_type] = {
FirmwareKeys.FIRMWARE_FILENAME: filename,
FirmwareKeys.INIT_PACKET_DATA: init_packet_data.copy(),
# Copying init packet to avoid using the same for all firmware
}
if firmware_type == HexType.SD_BL:
self.firmwares_data[firmware_type][FirmwareKeys.SD_SIZE] = sd_size
self.firmwares_data[firmware_type][FirmwareKeys.BL_SIZE] = bl_size
@staticmethod
def _create_init_packet(firmware_data):
p = Packet(firmware_data[FirmwareKeys.INIT_PACKET_DATA])
return p.generate_packet()
@staticmethod
def normalize_firmware_to_bin(work_directory, firmware_path):
firmware_filename = os.path.basename(firmware_path)
new_filename = firmware_filename.replace(".hex", ".bin")
new_filepath = os.path.join(work_directory, new_filename)
if not os.path.exists(new_filepath):
temp = nRFHex(firmware_path)
temp.tobinfile(new_filepath)
return new_filepath
@staticmethod
def unpack_package(package_path, target_dir):
"""
Unpacks a Nordic DFU package.
:param str package_path: Path to the package
:param str target_dir: Target directory to unpack the package to
:return: Manifest Manifest: Returns a manifest back to the user. The manifest is a parse datamodel
of the manifest found in the Nordic DFU package.
"""
if not os.path.isfile(package_path):
raise NordicSemiException("Package {0} not found.".format(package_path))
target_dir = os.path.abspath(target_dir)
target_base_path = os.path.dirname(target_dir)
if not os.path.exists(target_base_path):
raise NordicSemiException("Base path to target directory {0} does not exist.".format(target_base_path))
if not os.path.isdir(target_base_path):
raise NordicSemiException("Base path to target directory {0} is not a directory.".format(target_base_path))
if os.path.exists(target_dir):
raise NordicSemiException(
"Target directory {0} exists, not able to unpack to that directory.",
target_dir)
with ZipFile(package_path, 'r') as pkg:
pkg.extractall(target_dir)
with open(os.path.join(target_dir, Package.MANIFEST_FILENAME), 'r') as f:
_json = f.read()
""":type :str """
return Manifest.from_json(_json)
| dptechnics/Espruino | targetlibs/nrf5x/nrf51_sdk/examples/dfu/experimental/master_control_panel_patch/package.py | Python | mpl-2.0 | 14,818 |
import bitcoin
from bitcoin.core import x, b2x, CMutableOutPoint, CMutableTxIn, CMutableTxOut
from bitcoin.core.script import SIGHASH_ALL, SIGHASH_ANYONECANPAY
from bitcoin.core.scripteval import VerifyScript, SCRIPT_VERIFY_P2SH
from bitcoin.wallet import CBitcoinSecret
from PyQt4.QtGui import *
from PyQt4 import QtCore
from PyQt4.QtCore import QAbstractTableModel, QModelIndex, Qt, QVariant
from hashmal_lib.core.script import Script
from hashmal_lib.core import chainparams
from hashmal_lib.core.transaction import Transaction, sig_hash_name, sig_hash_explanation, sighash_types, sighash_types_by_value
from hashmal_lib.core.utils import is_hex, format_hex_string
from hashmal_lib.widgets.tx import TxWidget, InputsTree, OutputsTree, TimestampWidget
from hashmal_lib.widgets.script import ScriptEditor
from hashmal_lib.gui_utils import Separator, floated_buttons, AmountEdit, HBox, monospace_font, OutputAmountEdit, RawRole
from base import BaseDock, Plugin, Category, augmenter
from item_types import ItemAction
def make_plugin():
return Plugin(TxBuilder)
class TxBuilder(BaseDock):
tool_name = 'Transaction Builder'
description = 'Transaction Builder helps you create transactions.'
is_large = True
category = Category.Tx
def __init__(self, handler):
super(TxBuilder, self).__init__(handler)
self.raw_tx.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.raw_tx.customContextMenuRequested.connect(self.context_menu)
@augmenter
def item_actions(self, *args):
return [ItemAction(self.tool_name, 'Transaction', 'Edit', self.deserialize_item)]
def init_data(self):
self.tx = None
def create_layout(self):
vbox = QVBoxLayout()
self.tabs = tabs = QTabWidget()
tabs.addTab(self.create_version_locktime_tab(), '&Version/Locktime')
tabs.addTab(self.create_inputs_tab(), '&Inputs')
tabs.addTab(self.create_outputs_tab(), '&Outputs')
tabs.addTab(self.create_review_tab(), '&Review')
tabs.addTab(self.create_sign_tab(), 'Sig&n')
self.setFocusProxy(self.tabs)
self.tx_field_widgets = []
tabs.insertTab(3, self.create_other_tab(), 'Ot&her')
self.adjust_tx_fields()
# Build the tx if the Review tab is selected.
def maybe_build(i):
if str(tabs.tabText(i)) == '&Review' or str(tabs.tabText(i)) == 'Sig&n':
self.build_transaction()
tabs.currentChanged.connect(maybe_build)
vbox.addWidget(tabs)
return vbox
def context_menu(self, position):
menu = self.raw_tx.createStandardContextMenu(position)
txt = str(self.raw_tx.toPlainText())
if txt:
self.handler.add_plugin_actions(self, menu, txt)
menu.exec_(self.raw_tx.viewport().mapToGlobal(position))
def create_version_locktime_tab(self):
form = QFormLayout()
self.version_edit = AmountEdit()
self.version_edit.setText('1')
self.version_edit.setWhatsThis('Use this field to specify the version of your transaction. In Bitcoin, transactions are currently version 1.')
self.locktime_edit = AmountEdit()
self.locktime_edit.setText('0')
self.locktime_edit.setWhatsThis('Use this field to specify the locktime of your transaction. For most common transactions, locktime is zero.')
version_desc = QLabel('A transaction\'s version determines how it is interpreted.\n\nBitcoin transactions are currently version 1.')
locktime_desc = QLabel('A transaction\'s locktime defines the earliest time or block that it may be added to the blockchain.\n\nLocktime only applies if it\'s non-zero and at least one input has a Sequence that\'s not the maximum possible value.')
for i in [version_desc, locktime_desc]:
i.setWordWrap(True)
for i in [self.version_edit, self.locktime_edit]:
i.setFont(monospace_font)
form.addRow(version_desc)
form.addRow('Version:', self.version_edit)
form.addRow(Separator())
form.addRow(locktime_desc)
form.addRow('Locktime:', self.locktime_edit)
w = QWidget()
w.setLayout(form)
return w
def create_inputs_tab(self):
form = QFormLayout()
self.inputs_tree = InputsTree()
self.inputs_tree.view.setWhatsThis('The inputs of your transaction are displayed here.')
self.inputs_editor = InputsEditor(self.handler.gui, self.inputs_tree)
self.inputs_editor.setEnabled(False)
def update_enabled_widgets():
num_inputs = len(self.inputs_tree.get_inputs())
self.inputs_editor.setEnabled(num_inputs > 0)
def add_input():
outpoint = CMutableOutPoint(n=0)
new_input = CMutableTxIn(prevout=outpoint)
self.inputs_tree.add_input(new_input)
update_enabled_widgets()
if len(self.inputs_tree.get_inputs()) > 0:
self.inputs_tree.view.selectRow(self.inputs_tree.model.rowCount() - 1)
update_enabled_widgets()
add_input_button = QPushButton('New input')
add_input_button.setToolTip('Add a new input')
add_input_button.setWhatsThis('Clicking this button will add a new input to your transaction.')
add_input_button.clicked.connect(add_input)
form.addRow(self.inputs_tree)
form.addRow(Separator())
form.addRow(self.inputs_editor)
form.addRow(Separator())
form.addRow(floated_buttons([add_input_button]))
w = QWidget()
w.setLayout(form)
return w
def create_outputs_tab(self):
form = QFormLayout()
self.outputs_tree = OutputsTree()
self.outputs_tree.view.setWhatsThis('The outputs of your transaction are displayed here.')
self.outputs_editor = OutputsEditor(self.handler.gui, self.outputs_tree)
self.outputs_editor.setEnabled(False)
def update_enabled_widgets():
num_outputs = len(self.outputs_tree.get_outputs())
self.outputs_editor.setEnabled(num_outputs > 0)
def add_output():
new_output = CMutableTxOut(0)
self.outputs_tree.add_output(new_output)
update_enabled_widgets()
if len(self.outputs_tree.get_outputs()) > 0:
self.outputs_tree.view.selectRow(self.outputs_tree.model.rowCount() - 1)
update_enabled_widgets()
add_output_button = QPushButton('New output')
add_output_button.setToolTip('Add a new output')
add_output_button.setWhatsThis('Clicking this button will add a new output to your transaction.')
add_output_button.clicked.connect(add_output)
form.addRow(self.outputs_tree)
form.addRow(Separator())
form.addRow(self.outputs_editor)
form.addRow(Separator())
form.addRow(floated_buttons([add_output_button]))
w = QWidget()
w.setLayout(form)
return w
def create_review_tab(self):
form = QFormLayout()
self.raw_tx = QTextEdit()
self.raw_tx.setWhatsThis('The transaction you build is displayed here.')
self.raw_tx.setReadOnly(True)
self.tx_widget = TxWidget()
form.addRow('Raw Tx:', self.raw_tx)
form.addRow(self.tx_widget)
w = QWidget()
w.setLayout(form)
return w
def create_other_tab(self):
self.tx_fields_layout = QFormLayout()
w = QWidget()
w.setLayout(self.tx_fields_layout)
return w
def create_sign_tab(self):
self.sighash_widget = SigHashWidget(self)
return self.sighash_widget
def deserialize_item(self, item):
self.deserialize_raw(item.raw())
def deserialize_raw(self, rawtx):
"""Update editor widgets with rawtx's data."""
self.needsFocus.emit()
try:
tx = Transaction.deserialize(x(rawtx))
except Exception:
return
else:
self.version_edit.set_amount(tx.nVersion)
self.inputs_tree.model.set_tx(tx)
self.outputs_tree.model.set_tx(tx)
self.locktime_edit.set_amount(tx.nLockTime)
for name, w in self.tx_field_widgets:
if name in ['nVersion', 'vin', 'vout', 'nLockTime']:
continue
try:
value = getattr(tx, name)
except AttributeError:
continue
if isinstance(w, AmountEdit):
w.set_amount(value)
else:
w.setText(str(value))
self.build_transaction()
def build_transaction(self):
self.tx_widget.clear()
self.sighash_widget.clear()
self.tx = tx = Transaction()
tx.nVersion = self.version_edit.get_amount()
tx.vin = self.inputs_tree.get_inputs()
tx.vout = self.outputs_tree.get_outputs()
tx.nLockTime = self.locktime_edit.get_amount()
for name, w in self.tx_field_widgets:
if not name in [field[0] for field in tx.fields]:
continue
value = str(w.text())
default = getattr(tx, name)
if isinstance(default, int):
value = w.get_amount()
setattr(tx, name, value)
self.raw_tx.setText(bitcoin.core.b2x(tx.serialize()))
self.tx_widget.set_tx(tx)
self.sighash_widget.set_tx(tx)
def on_option_changed(self, key):
if key == 'chainparams':
self.needsUpdate.emit()
def adjust_tx_fields(self):
"""Show or hide tx field widgets."""
tx_fields = chainparams.get_tx_fields()
for field in tx_fields:
name = field[0]
if name in ['nVersion', 'vin', 'vout', 'nLockTime']:
continue
default_value = field[3]
if name not in [j[0] for j in self.tx_field_widgets]:
widget = QLineEdit()
if isinstance(default_value, int):
# Special case for timestamp fields.
if name == 'Timestamp':
widget = TimestampWidget()
widget.timestamp_raw.setReadOnly(False)
else:
widget = AmountEdit()
widget.setText(str(default_value))
label = QLabel(''.join([name, ':']))
self.tx_field_widgets.append((name, widget))
self.tx_fields_layout.addRow(label, widget)
tx_field_names = [i[0] for i in tx_fields]
for name, w in self.tx_field_widgets:
l = self.tx_fields_layout.labelForField(w)
if name in tx_field_names:
w.show()
l.show()
else:
w.hide()
l.hide()
if tx_field_names == ['nVersion', 'vin', 'vout', 'nLockTime']:
self.tabs.setTabEnabled(3, False)
else:
self.tabs.setTabEnabled(3, True)
def refresh_data(self):
self.adjust_tx_fields()
self.build_transaction()
class BaseEditor(QWidget):
"""Item editor for inputs or outputs."""
def __init__(self, tree, parent=None):
super(BaseEditor, self).__init__(parent)
self.tree = tree
self.mapper = QDataWidgetMapper()
self.mapper.setModel(self.tree.model)
self.mapper.setSubmitPolicy(QDataWidgetMapper.ManualSubmit)
self.tree.view.selectionModel().selectionChanged.connect(self.selection_changed)
def selection_changed(self, selected, deselected):
try:
index = selected.indexes()[0]
self.setEnabled(True)
except IndexError:
self.setEnabled(False)
return
self.mapper.setCurrentIndex(index.row())
def do_delete(self):
index = self.mapper.currentIndex()
self.tree.model.removeRow(index)
def do_submit(self):
self.mapper.submit()
class InputsEditor(BaseEditor):
def __init__(self, main_window, tree, parent=None):
super(InputsEditor, self).__init__(tree, parent)
self.prev_tx = QLineEdit()
self.prev_tx.setToolTip('Transaction ID of the tx with the output being spent')
self.prev_tx.setWhatsThis('Use this field to specify the transaction that contains the output you\'re spending.')
self.prev_vout = AmountEdit()
self.prev_vout.setToolTip('Output index of the previous transaction')
self.prev_vout.setWhatsThis('Use this field to specify the output you are spending of the previous transaction.')
self.script = ScriptEditor(main_window)
self.script.setToolTip('Script that will be put on the stack before the previous output\'s script.')
self.script.setWhatsThis('Enter a script here. This script will be evaluated directly before the script of the output you are spending. Any values that are pushed onto the stack when this script finishes its execution are present when the output script is evaluated afterward.')
self.sequence = AmountEdit()
self.sequence.setText('4294967295')
self.sequence.setWhatsThis('Use this field to specify the sequence value. It\'s likely that you should leave this as its default (maximum) value.')
maxify_input_sequence = QPushButton('Max')
maxify_input_sequence.clicked.connect(lambda: self.sequence.setText('0xffffffff'))
maxify_input_sequence.setWhatsThis('This button will set the sequence to its default value.')
for i in [self.prev_tx, self.prev_vout, self.script, self.sequence]:
i.setFont(monospace_font)
self.mapper.addMapping(self.prev_tx, 0)
self.mapper.addMapping(self.prev_vout, 1, 'amount')
self.mapper.addMapping(self.script, 2, 'humanText')
self.mapper.addMapping(self.sequence, 3, 'amount')
delete_button = QPushButton('Remove Input')
delete_button.setToolTip('Remove this input from the transaction')
delete_button.clicked.connect(self.do_delete)
submit_button = QPushButton('Save')
submit_button.setToolTip('Update input with the above data')
submit_button.clicked.connect(self.do_submit)
form = QFormLayout()
form.setContentsMargins(0, 0, 0, 0)
form.addRow('Previous Transaction: ', self.prev_tx)
form.addRow('Previous Tx Output: ', self.prev_vout)
form.addRow('Input script: ', self.script)
seq_desc = QLabel('Sequence is mostly deprecated.\nIf an input has a sequence that\'s not the maximum value, the transaction\'s locktime will apply.')
seq_desc.setWordWrap(True)
form.addRow(seq_desc)
form.addRow('Sequence: ', HBox(self.sequence, maxify_input_sequence))
form.addRow(floated_buttons([delete_button, submit_button]))
self.setLayout(form)
class OutputsEditor(BaseEditor):
def __init__(self, main_window, tree, parent=None):
super(OutputsEditor, self).__init__(tree, parent)
self.out_value = OutputAmountEdit()
self.out_value.setToolTip('Output amount')
self.out_value.setWhatsThis('Use this field to specify the value of this output. Depending on your settings, the value may be in satoshis (no decimals), or coins (1 coin = 100000000 satoshis).')
self.script = ScriptEditor(main_window)
self.script.setToolTip('Script that will be put on the stack after the input that spends it.')
self.script.setWhatsThis('Enter a script here. This script will be evaluated directly after the script of the input that spends it in the future. This script will have access to the values that are on the stack after the input script that spends it has executed.')
for i in [self.out_value, self.script]:
i.setFont(monospace_font)
self.mapper.addMapping(self.out_value, 0, 'satoshis')
self.mapper.addMapping(self.script, 1, 'humanText')
submit_button = QPushButton('Save')
submit_button.setToolTip('Update input with the above data')
submit_button.clicked.connect(self.do_submit)
delete_button = QPushButton('Remove Output')
delete_button.setToolTip('Remove this output from the transaction')
delete_button.clicked.connect(self.do_delete)
form = QFormLayout()
form.setContentsMargins(0, 0, 0, 0)
form.addRow('Amount: ', self.out_value)
form.addRow('Output script: ', self.script)
form.addRow(floated_buttons([delete_button, submit_button]))
self.setLayout(form)
# Widgets for signing transactions.
class SigHashModel(QAbstractTableModel):
"""Models a transaction's signature hash."""
SigHashName = 5
SigHashExplanation = 6
def __init__(self, parent=None):
super(SigHashModel, self).__init__(parent)
self.clear()
def clear(self):
self.beginResetModel()
self.utxo_script = None
self.tx = None
self.inIdx = 0
self.sighash_type = SIGHASH_ALL
self.anyone_can_pay = False
self.endResetModel()
def rowCount(self, parent=QModelIndex()):
return 1
def columnCount(self, parent=QModelIndex()):
return 7
def data(self, index, role = Qt.DisplayRole):
if not index.isValid():
return None
if role not in [Qt.DisplayRole, Qt.ToolTipRole, Qt.EditRole]:
return None
data = None
c = index.column()
if c == 0:
if self.utxo_script:
data = self.utxo_script.get_human()
elif c == 1:
if self.tx:
data = b2x(self.tx.serialize())
elif c == 2:
data = self.inIdx
elif c == 3:
data = sighash_types_by_value[self.sighash_type]
elif c == 4:
if role == Qt.CheckStateRole:
data = Qt.Checked if self.anyone_can_pay else Qt.Unchecked
else:
data = self.anyone_can_pay
elif c == self.SigHashName:
data = sig_hash_name(self.sighash_type | SIGHASH_ANYONECANPAY if self.anyone_can_pay else self.sighash_type)
elif c == self.SigHashExplanation:
data = sig_hash_explanation(self.sighash_type | SIGHASH_ANYONECANPAY if self.anyone_can_pay else self.sighash_type)
return data
def setData(self, index, value, role = Qt.EditRole):
if not index.isValid():
return False
c = index.column()
if c == 0:
try:
self.utxo_script = Script.from_human(str(value.toString()))
except Exception:
return False
self.dataChanged.emit(self.index(index.row(), c), self.index(index.row(), c))
elif c == 1:
try:
self.tx = Transaction.deserialize(x(str(value.toString())))
except Exception:
return False
self.dataChanged.emit(self.index(index.row(), c), self.index(index.row(), c))
elif c == 2:
tmpIdx, ok = value.toInt()
if not ok:
return False
self.inIdx = tmpIdx
self.dataChanged.emit(self.index(index.row(), c), self.index(index.row(), c))
elif c == 3:
if role == Qt.EditRole:
val = str(value.toString())
sighash_type = sighash_types.get(val)
if not sighash_type:
return False
self.sighash_type = sighash_type
elif role == RawRole:
tmpType, ok = value.toInt()
if not ok:
return False
self.sighash_type = tmpType
self.dataChanged.emit(self.index(index.row(), c), self.index(index.row(), self.SigHashExplanation))
elif c == 4:
self.anyone_can_pay = value.toBool()
self.dataChanged.emit(self.index(index.row(), c), self.index(index.row(), self.SigHashExplanation))
return True
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def set_tx(self, tx):
self.setData(self.index(0, 1), QVariant(b2x(tx.serialize())))
def get_fields(self):
"""Returns the fields necessary to sign the transaction."""
hash_type = self.sighash_type
if self.anyone_can_pay:
hash_type = hash_type | SIGHASH_ANYONECANPAY
return (self.utxo_script, self.tx, self.inIdx, hash_type)
def set_fields(self, script=None, txTo=None, inIdx=None, hashType=None):
"""Populate model.
Args:
script (str): Human-readable script.
txTo (Transaction): Transaction.
inIdx (int): Input index.
hashType (int): SigHash type.
"""
if script is not None:
self.setData(self.index(0, 0), QVariant(script))
if txTo is not None:
self.setData(self.index(0, 1), QVariant(b2x(txTo.serialize())))
if inIdx is not None:
self.setData(self.index(0, 2), QVariant(inIdx))
if hashType is not None:
self.setData(self.index(0, 3), QVariant(hashType & 0x1f), RawRole)
self.setData(self.index(0, 4), QVariant(hashType & SIGHASH_ANYONECANPAY))
class SigHashWidget(QWidget):
"""Model and view of a transaction's signature hash."""
def __init__(self, dock, parent=None):
super(SigHashWidget, self).__init__(parent)
self.dock = dock
self.model = SigHashModel()
self.mapper = QDataWidgetMapper()
self.mapper.setModel(self.model)
self.utxo_script = ScriptEditor(self.dock.handler.gui)
self.utxo_script.setToolTip('Script from the unspent output')
self.utxo_script.setWhatsThis('Enter the output script from the unspent output you are spending here.')
self.utxo_script.setFixedHeight(42)
self.inIdx = QSpinBox()
self.inIdx.setRange(0, 0)
self.inIdx.setToolTip('Input to sign')
self.inIdx.setWhatsThis('This specifies the input that will be signed.')
self.sighash_type = QComboBox()
self.sighash_type.setToolTip('Signature hash type')
self.sighash_type.setWhatsThis('Use this to specify the signature hash flag you want to use. The flags have different effects and are explained in the box to the right.')
self.sighash_type.addItems(['SIGHASH_ALL', 'SIGHASH_NONE', 'SIGHASH_SINGLE'])
self.anyone_can_pay = QCheckBox('SIGHASH_ANYONECANPAY')
self.anyone_can_pay.setWhatsThis('Use this to add the ANYONECANPAY flag to your signature hash type. Its effect is explained in the box to the right.')
self.sighash_name = QLineEdit()
self.sighash_name.setToolTip('Signature hash name')
self.sighash_name.setWhatsThis('The full name of your current signature hash type is shown here.')
self.sighash_explanation = QTextEdit()
self.sighash_explanation.setToolTip('Signature hash explanation')
self.sighash_explanation.setWhatsThis('A description of your current signature hash type is shown here.')
for i in [self.sighash_name, self.sighash_explanation]:
i.setReadOnly(True)
self.mapper.addMapping(self.utxo_script, 0, 'humanText')
self.mapper.addMapping(self.inIdx, 2)
self.mapper.addMapping(self.sighash_type, 3)
self.mapper.addMapping(self.anyone_can_pay, 4)
self.mapper.addMapping(self.sighash_name, SigHashModel.SigHashName)
self.mapper.addMapping(self.sighash_explanation, SigHashModel.SigHashExplanation)
self.privkey_edit = QLineEdit()
self.privkey_edit.setWhatsThis('Use this to enter a private key with which to sign the transaction.')
self.privkey_edit.setPlaceholderText('Enter a private key')
self.sign_button = QPushButton('Sign')
self.sign_button.setToolTip('Sign transaction')
self.sign_button.setWhatsThis('Clicking this button will attempt to sign the transaction with your private key.')
self.sign_button.clicked.connect(self.sign_transaction)
self.verify_script = QCheckBox('Verify script')
self.verify_script.setToolTip('Verify input script')
self.verify_script.setWhatsThis('If this is checked, Hashmal will attempt to verify the completed script.')
signing_form = QFormLayout()
privkey_hbox = QHBoxLayout()
privkey_hbox.addWidget(self.privkey_edit, stretch=1)
privkey_hbox.addWidget(self.sign_button)
privkey_hbox.addWidget(self.verify_script)
self.result_edit = QLineEdit()
self.result_edit.setReadOnly(True)
self.result_edit.setPlaceholderText('Result of signing')
self.result_edit.setWhatsThis('The result of signing the transaction will be shown here.')
signing_form.addRow('Private Key:', privkey_hbox)
signing_form.addRow('Result:', self.result_edit)
tx_form = QFormLayout()
tx_form.addRow('Unspent Output Script:', self.utxo_script)
tx_form.addRow('Input To Sign:', self.inIdx)
sighash_controls = QFormLayout()
sighash_controls.addRow('SigHash flag:', self.sighash_type)
sighash_controls.addRow(self.anyone_can_pay)
sighash_info = QVBoxLayout()
sighash_info.addWidget(self.sighash_name)
sighash_info.addWidget(self.sighash_explanation)
sighash_layout = QHBoxLayout()
sighash_layout.addLayout(sighash_controls)
sighash_layout.addLayout(sighash_info)
vbox = QVBoxLayout()
vbox.addLayout(signing_form)
vbox.addWidget(Separator())
vbox.addLayout(tx_form)
vbox.addWidget(Separator())
vbox.addLayout(sighash_layout)
self.setLayout(vbox)
self.mapper.toFirst()
def set_tx(self, tx):
self.inIdx.setRange(0, len(tx.vin) - 1)
self.model.set_tx(tx)
self.mapper.toFirst()
def clear(self):
self.result_edit.clear()
self.result_edit.setProperty('hasError', False)
self.style().polish(self.result_edit)
self.model.clear()
def set_result_message(self, text, error=False):
self.result_edit.setText(text)
self.result_edit.setProperty('hasError', error)
self.style().polish(self.result_edit)
if error:
self.dock.error(text)
else:
self.dock.info(text)
def sign_transaction(self):
"""Sign the transaction."""
script, txTo, inIdx, hash_type = self.model.get_fields()
if inIdx >= len(txTo.vin):
self.set_result_message('Nonexistent input specified for signing.', error=True)
return
if not script:
self.set_result_message('Invalid output script.', error=True)
return
privkey = self.get_private_key()
if not privkey:
self.set_result_message('Could not parse private key.', error=True)
return
sig_hash = chainparams.signature_hash(script, txTo, inIdx, hash_type)
sig = privkey.sign(sig_hash)
hash_type_hex = format_hex_string(hex(hash_type), with_prefix=False).decode('hex')
sig = sig + hash_type_hex
txTo.vin[inIdx].scriptSig = Script([sig, privkey.pub])
if self.verify_script.isChecked():
# Try verify
try:
VerifyScript(txTo.vin[inIdx].scriptSig, script, txTo, inIdx, (SCRIPT_VERIFY_P2SH,))
except Exception as e:
self.set_result_message('Error when verifying: %s' % str(e), error=True)
return
self.dock.deserialize_raw(b2x(txTo.serialize()))
# Deserializing a tx clears the model, so re-populate.
self.model.set_fields(script=script.get_human(), inIdx=inIdx, hashType=hash_type)
self.set_result_message('Successfully set scriptSig for input %d (SigHash type: %s).' % (inIdx, sig_hash_name(hash_type)))
def get_private_key(self):
"""Attempt to parse the private key that was input."""
txt = str(self.privkey_edit.text())
privkey = None
if is_hex(txt):
txt = format_hex_string(txt, with_prefix=False)
try:
privkey = CBitcoinSecret.from_secret_bytes(x(txt))
except Exception:
pass
return privkey
| mazaclub/hashmal | hashmal_lib/plugins/tx_builder.py | Python | gpl-3.0 | 28,425 |
import calendar
import time
import hdbfs.db
TYPE_NILL = 0
TYPE_FILE = 1000
TYPE_FILE_DUP = 1001
TYPE_FILE_VAR = 1002
TYPE_GROUP = 2000
TYPE_ALBUM = 2001
TYPE_CLASSIFIER = 2002
class LinkedDuplicateIterator:
def __init__( self, session ):
self.__session = session
self.__iter = self.__session.execute(
'SELECT id FROM objl WHERE type = :type',
{ 'type' : TYPE_FILE_DUP, } ).__iter__()
def __iter__( self ):
return self
def next( self ):
while True:
( obj_id, ) = self.__iter.next()
try:
self.__session.execute( 'SELECT id FROM objl WHERE dup = :obj',
{ 'obj' : obj_id } ).__iter__().next()
return obj_id
except StopIteration:
pass
try:
self.__session.execute( 'SELECT parent FROM rel2 WHERE child = :obj',
{ 'obj' : obj_id } ).__iter__().next()
return obj_id
except StopIteration:
pass
try:
self.__session.execute( 'SELECT child FROM rel2 WHERE parent = :obj',
{ 'obj' : obj_id } ).__iter__().next()
return obj_id
except StopIteration:
pass
def determine_duplicate_parent( session, obj_id ):
result = session.execute( 'SELECT type, dup FROM objl WHERE id = :obj',
{ 'obj' : obj_id } ).first()
if( result is None ):
return None
if( result['type'] != TYPE_FILE_DUP ):
return obj_id
else:
return determine_duplicate_parent( session, result['dup'] )
def correct_linked_duplicates( session ):
for obj_id in LinkedDuplicateIterator( session ):
parent_id = determine_duplicate_parent( session, obj_id )
mapping = { 'obj' : obj_id, 'par' : parent_id }
# Move all dup/vars
session.execute( 'UPDATE objl SET dup = :par WHERE dup = :obj',
mapping )
# Move parents
for result in session.execute( 'SELECT parent FROM rel2 WHERE child = :par',
mapping ):
session.execute( 'DELETE FROM rel2 WHERE child = :obj and parent = :oth',
{ 'obj' : obj_id,
'oth' : result['parent'] } )
session.execute( 'DELETE FROM rel2 WHERE child = :obj and parent = :par',
mapping )
session.execute( 'UPDATE rel2 SET child = :par WHERE child = :obj',
mapping )
# Move children
for result in session.execute( 'SELECT child FROM rel2 WHERE parent = :par',
mapping ):
session.execute( 'DELETE FROM rel2 WHERE parent = :obj and child = :oth',
{ 'obj' : obj_id,
'oth' : result['child'] } )
session.execute( 'DELETE FROM rel2 WHERE parent = :obj and child = :par',
mapping )
session.execute( 'UPDATE rel2 SET parent = :par WHERE parent = :obj',
mapping )
def upgrade_from_8_to_8_1( log, session ):
log.info( 'Database upgrade from VER 8 -> VER 8.1' )
correct_linked_duplicates( session )
session.execute( 'UPDATE dbi SET ver = 8, rev = 1' )
return 8, 1
def upgrade_from_8_1_to_9( log, session ):
log.info( 'Database upgrade from VER 8.1 -> VER 9' )
return 9, 0
def upgrade_from_9_to_10( log, session ):
log.info( 'Database upgrade from VER 9 -> VER 10' )
now = calendar.timegm(time.gmtime())
session.execute( 'CREATE TABLE objects (\n'
'object_id INTEGER PRIMARY KEY,\n'
'object_type INTEGER NOT NULL,\n'
'create_ts INTEGER NOT NULL,\n'
'name TEXT,\n'
'root_stream_id INTEGER )' )
session.execute( 'CREATE TABLE streams (\n'
'stream_id INTEGER PRIMARY KEY,\n'
'object_id INTEGER NOT NULL,\n'
'name TEXT NOT NULL,\n'
'priority INTEGER NOT NULL,\n'
'origin_stream_id INTEGER,\n'
'extension TEXT,\n'
'mime_type TEXT,\n'
'stream_length INTEGER,\n'
'hash_crc32 TEXT,\n'
'hash_md5 TEXT,\n'
'hash_sha1 TEXT,\n'
'UNIQUE ( object_id, name ),\n'
'FOREIGN KEY ( object_id ) '
'REFERENCES objects( object_id ),\n'
'FOREIGN KEY ( origin_stream_id ) '
'REFERENCES streams( stream_id ) )\n' )
session.execute( 'CREATE TABLE object_metadata (\n'
'object_id INTEGER NOT NULL,\n'
'key TEXT NOT NULL,\n'
'value TEXT,\n'
'numeric INTEGER,\n'
'PRIMARY KEY ( object_id, key ),\n'
'FOREIGN KEY ( object_id ) '
'REFERENCES objects( object_id ) )\n' )
session.execute( 'CREATE TABLE stream_metadata (\n'
'stream_id INTEGER NOT NULL,\n'
'key TEXT NOT NULL,\n'
'value TEXT,\n'
'numeric INTEGER,\n'
'PRIMARY KEY ( stream_id, key ),\n'
'FOREIGN KEY ( stream_id ) '
'REFERENCES objects( object_id ) )\n' )
session.execute( 'CREATE TABLE relations (\n'
'child_id INTEGER NOT NULL,\n'
'parent_id INTEGER NOT NULL,\n'
'sort INTEGER,\n'
'PRIMARY KEY ( child_id, parent_id ),\n'
'FOREIGN KEY ( child_id ) '
'REFERENCES objects( object_id ),\n'
'FOREIGN KEY ( parent_id ) '
'REFERENCES objects( object_id ) )\n' )
session.execute( 'CREATE TABLE stream_log(\n'
'log_id INTEGER PRIMARY KEY,\n'
'stream_id INTEGER NOT NULL,\n'
'timestamp INTEGER NOT NULL,\n'
'origin_method TEXT NOT NULL,\n'
'origin_stream_id INTEGER,\n'
'origin_name TEXT,\n'
'FOREIGN KEY ( stream_id ) '
'REFERENCES streams( stream_id ),\n'
'FOREIGN KEY ( origin_stream_id ) '
'REFERENCES streams( stream_id ) )\n' )
# Copy objl
session.execute( 'INSERT INTO objects ( object_id, '
'object_type, '
'create_ts, '
'name ) '
'SELECT id, type, create_ts, name '
'FROM objl '
'WHERE type != 1001' )
# Bugs in earlier versions of hdbfs can lead to duplicate rows in the rel2
# table. Delete the duplicate rows now
session.execute( 'DELETE FROM rel2 '
'WHERE rowid NOT IN ( '
'SELECT MIN( rowid ) '
'FROM rel2 GROUP BY parent, child )' )
# Copy rel2
session.execute( 'INSERT INTO relations '
'SELECT r.child, r.parent, r.sort '
'FROM rel2 r '
'INNER JOIN objl a ON a.id = r.child '
'INNER JOIN objl b ON b.id = r.parent '
'WHERE a.type != 1001 '
'AND b.type != 1001' )
# Remove the variant type
session.execute( 'UPDATE objects SET object_type = 1000 '
'WHERE object_type = 1002' )
# Copy primary streams
session.execute( 'INSERT INTO streams ( stream_id, '
'object_id, '
'name, '
'priority, '
'stream_length, '
'hash_crc32, '
'hash_md5, '
'hash_sha1 ) '
'SELECT f.id, f.id, ".", 2000, f.len, '
'f.crc32, f.md5, f.sha1 '
'FROM fchk f '
'INNER JOIN objl o ON o.id = f.id '
'WHERE o.type != 1001' )
# Copy primary metadata (except altname, original-width, original-height)
session.execute( 'INSERT INTO object_metadata ( object_id, '
'key, '
'value, '
'numeric ) '
'SELECT id, key, value, num '
'FROM mtda '
'WHERE id IN (SELECT object_id FROM objects) '
'AND key NOT IN ( "altname", '
'"original-width", '
'"original-height", '
'"rotation", '
'"thumb-gen" ) ' )
# Copy original-width/height from primaries
session.execute( 'INSERT INTO stream_metadata ( stream_id, '
'key, '
'value, '
'numeric ) '
'SELECT id, "width", value, num '
'FROM mtda '
'WHERE id IN (SELECT object_id FROM objects) '
'AND key = "original-width"' )
session.execute( 'INSERT INTO stream_metadata ( stream_id, '
'key, '
'value, '
'numeric ) '
'SELECT id, "height", value, num '
'FROM mtda '
'WHERE id IN (SELECT object_id FROM objects) '
'AND key = "original-height"' )
# Copy rotation from primaries
session.execute( 'INSERT INTO stream_metadata ( stream_id, '
'key, '
'value, '
'numeric ) '
'SELECT id, key, value, num '
'FROM mtda '
'WHERE id IN (SELECT object_id FROM objects) '
'AND key = "rotation"' )
# Resolve variants
session.execute( 'INSERT INTO relations ( child_id, parent_id ) '
'SELECT id, dup '
'FROM objl '
'WHERE type = 1002' )
# Copy duplicate streams
session.execute( 'INSERT INTO streams ( stream_id, '
'object_id, '
'name, '
'priority, '
'stream_length, '
'hash_crc32, '
'hash_md5, '
'hash_sha1 ) '
'SELECT o.id, o.dup, "dup:" || f.sha1, '
'2000, f.len, f.crc32, f.md5, f.sha1 '
'FROM objl o '
'INNER JOIN fchk f ON f.id = o.id '
'WHERE o.type = 1001' )
# Copy original-width/height from duplicates
session.execute( 'INSERT INTO stream_metadata ( stream_id, '
'key, '
'value, '
'numeric ) '
'SELECT o.id, "width", m.value, m.num '
'FROM objl o '
'INNER JOIN mtda m ON m.id = o.id '
'WHERE o.type = 1001 '
'AND m.key = "original-width"' )
session.execute( 'INSERT INTO stream_metadata ( stream_id, '
'key, '
'value, '
'numeric ) '
'SELECT o.id, "height", m.value, m.num '
'FROM objl o '
'INNER JOIN mtda m ON m.id = o.id '
'WHERE o.type = 1001 '
'AND m.key = "original-height"' )
# Copy rotation from duplicates
session.execute( 'INSERT INTO stream_metadata ( stream_id, '
'key, '
'value, '
'numeric ) '
'SELECT o.id, m.key, m.value, m.num '
'FROM objl o '
'INNER JOIN mtda m ON m.id = o.id '
'WHERE o.type = 1001 '
'AND m.key = "rotation"' )
# Add the log entries for creation
session.execute( 'INSERT INTO stream_log ( stream_id, '
'timestamp, '
'origin_method ) '
'SELECT f.id, o.create_ts, '
'"hdbfs:legacy_create" '
'FROM fchk f '
'INNER JOIN objl o ON o.id = f.id' )
# We don't know what time the streams were named, so name them now
session.execute( 'INSERT INTO stream_log ( stream_id, '
'timestamp, '
'origin_method, '
'origin_name ) '
'SELECT f.id, :now, '
'"hdbfs:legacy_name", '
'o.name '
'FROM fchk f '
'INNER JOIN objl o ON o.id = f.id '
'WHERE o.name IS NOT NULL',
{ 'now' : now, } )
# Now add all the alt-names. This gets a bit dicey
for stream_id, altnames \
in session.execute( 'SELECT f.id, m.value '
'FROM fchk f '
'INNER JOIN mtda m ON m.id = f.id '
'WHERE m.key = "altname"' ):
altnames = altnames.split( ':' )
for name in altnames:
session.execute( 'INSERT INTO stream_log ( stream_id, '
'timestamp, '
'origin_method, '
'origin_name ) '
'VALUES ( :stream_id, :now, '
'"hdbfs:legacy_altname", '
':name )',
{ 'stream_id' : stream_id,
'now' : now,
'name' : name, } )
# Assign root streams
session.execute( 'UPDATE objects SET root_stream_id = object_id '
'WHERE object_id IN (SELECT stream_id FROM streams)' )
# Create indexes
session.execute( 'CREATE UNIQUE INDEX streams_object_id_name_index '
'ON streams ( object_id, name )' )
session.execute( 'CREATE UNIQUE INDEX object_metadata_object_id_key_index '
'ON object_metadata( object_id, key )' )
session.execute( 'CREATE UNIQUE INDEX stream_metadata_stream_id_key_index '
'ON stream_metadata( stream_id, key )' )
session.execute( 'CREATE INDEX stream_log_stream_id_index '
'ON stream_log ( stream_id )' )
session.execute( 'DROP TABLE objl' )
session.execute( 'DROP TABLE fchk' )
session.execute( 'DROP TABLE mtda' )
session.execute( 'DROP TABLE rel2' )
return 10, 0
| hakuya/higu | lib/hdbfs/legacy/ver8rules.py | Python | bsd-2-clause | 16,841 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2022 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`ToroEtAl1997MblgNSHMP2008`,
:class:`ToroEtAl1997MwNSHMP2008`
"""
import numpy as np
from openquake.hazardlib.gsim.base import CoeffsTable, GMPE
from openquake.hazardlib.gsim.utils import (
mblg_to_mw_johnston_96, mblg_to_mw_atkinson_boore_87, clip_mean)
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
from openquake.baselib.general import CallableDict
_compute_finite_fault_correction = CallableDict()
@_compute_finite_fault_correction.add("Mblg")
def _compute_finite_fault_correction_Mblg(kind, mag):
"""
Compute finite fault correction term as geometric mean of correction
terms obtained from Mw values calculated with Johnston 1996 and
Atkinson and Boore 1987 conversion equations.
Implement equations as in lines 1653 - 1658 in hazgridXnga2.f
"""
mw_j96 = mblg_to_mw_johnston_96(mag)
mw_ab87 = mblg_to_mw_atkinson_boore_87(mag)
t1 = np.exp(-1.25 + 0.227 * mw_j96)
t2 = np.exp(-1.25 + 0.227 * mw_ab87)
return np.sqrt(t1 * t2)
@_compute_finite_fault_correction.add("Mw")
def _compute_finite_fault_correction_Mw(kind, mag):
"""
Compute finite fault correction term.
"""
return np.exp(-1.25 + 0.227 * mag)
def _compute_mean(kind, C, mag, rjb):
"""
Compute ground motion mean value.
"""
# line 1686 in hazgridXnga2.f
ffc = _compute_finite_fault_correction(kind, mag)
d = np.sqrt(rjb ** 2 + (C['c7'] ** 2) * (ffc ** 2))
# lines 1663, 1694-1696 in hazgridXnga2.f
mean = (C['c1'] + C['c2'] * (mag - 6.) +
C['c3'] * ((mag - 6.) ** 2) -
C['c4'] * np.log(d) - C['c6'] * d)
factor = np.log(rjb / 100.)
idx = factor > 0
mean[idx] -= (C['c5'] - C['c4']) * factor[idx]
return mean
class ToroEtAl1997MblgNSHMP2008(GMPE):
"""
Implements GMPE developed by G. R. Toro, N. A. Abrahamson, J. F. Schneider
and published in "Model of Strong Ground Motions from Earthquakes in
Central and Eastern North America: Best Estimates and Uncertainties"
(Seismological Research Letters, Volume 68, Number 1, 1997) as utilized
by the National Seismic Hazard Mapping Project (NSHMP) for the 2008 US
hazard model.
This class replicates the algorithm for the Toro et. al. 1997 GMPE as
coded in the subroutine ``getToro`` in the ``hazgridXnga2.f``
Fortran code available at:
http://earthquake.usgs.gov/hazards/products/conterminous/2008/software/
The class assumes rupture magnitude to be in Mblg scale (given that
MFDs for central and eastern US are given in this scale).
The equation implements also the finite-fault correction as given in
"Modification of the Toro et al. 1997 Attenuation Equations for Large
Magnitudes and Short Distances" (available at:
http://www.riskeng.com/downloads/attenuation_equations). The correction
uses Mw. Therefore Mblg is converted to Mw using both the Atkinson & Boore
1987 and Johnston 1996 conversion equations and an average correction term
is computed.
Coefficients are given for the B/C site conditions.
"""
kind = "Mblg"
#: Supported tectonic region type is stable continental crust,
#: given that the equations have been derived for central and eastern
#: north America
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.STABLE_CONTINENTAL
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, SA}
#: Supported intensity measure component is the geometric mean of
#: two horizontal components
#: :attr:`~openquake.hazardlib.const.IMC.GEOMETRIC_MEAN`,
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.GEOMETRIC_MEAN
#: Supported standard deviation type is only total.
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {const.StdDev.TOTAL}
#: No site parameters required
REQUIRES_SITES_PARAMETERS = set()
#: Required rupture parameter is only magnitude (Mblg).
REQUIRES_RUPTURE_PARAMETERS = {'mag'}
#: Required distance measure is rjb
REQUIRES_DISTANCES = {'rjb'}
#: Shear-wave velocity for reference soil conditions in [m s-1]
DEFINED_FOR_REFERENCE_VELOCITY = 760.
def compute(self, ctx: np.recarray, imts, mean, sig, tau, phi):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.compute>`
for spec of input and result values.
"""
for m, imt in enumerate(imts):
C = self.COEFFS[imt]
mean[m] = clip_mean(
imt, _compute_mean(self.kind, C, ctx.mag, ctx.rjb))
sig[m] = C['sigma']
#: Coefficient table obtained from coefficient arrays (tb1, tb2, tb3, tb4,
#: tb5, tb6, tbh) defined from line 1596 - 1614 in hazgridXnga2.f
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 sigma
pga 2.489 1.20 0.0 1.28 1.23 0.0018 9.3 0.7506
0.1 2.91 1.23 0.0 1.12 1.05 0.0043 8.5 0.7506
0.2 2.165 1.24 0.0 0.98 0.74 0.0039 7.5 0.7506
0.3 1.7323 1.51 -0.11 0.96 0.6881 0.0034 7.35 0.7506
0.5 1.109 1.785 -0.2795 0.93 0.6354 0.002732 7.05 0.7506
1.0 0.173 2.05 -0.34 0.90 0.59 0.0019 6.8 0.799
2.0 -0.788 2.52 -0.47 0.93 0.6 0.0012 7.0 0.799
""")
class ToroEtAl1997MwNSHMP2008(ToroEtAl1997MblgNSHMP2008):
"""
Extend :class:`ToroEtAl1997MblgNSHMP2008` but assumes magnitude to be in
Mw scale.
Coefficients are Mw-specific and no magnitude conversion is considered to
take into account finite-fault correction.
"""
kind = "Mw"
#: Coefficient table obtained from coefficient arrays (tc1, tc2, tc3, tc4,
#: tc5, tc6, th) defined in subroutine getToro in hazgridXnga2.f
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 sigma
pga 2.619 0.81 0.0 1.27 1.16 0.0021 9.3 0.7506
0.1 2.92 0.81 0.0 1.1 1.02 0.004 8.3 0.7506
0.2 2.295 0.84 0.0 0.98 0.66 0.0042 7.5 0.7506
0.3 1.8823 0.964 -0.059 0.951 0.601 0.00367 7.26 0.7506
0.5 1.2887 1.14 -0.1244 0.9227 0.5429 0.00306 7.027 0.7506
1.0 0.383 1.42 -0.2 0.90 0.49 0.0023 6.8 0.799
2.0 -0.558 1.86 -0.31 0.92 0.46 0.0017 6.9 0.799
""")
| gem/oq-engine | openquake/hazardlib/gsim/toro_1997.py | Python | agpl-3.0 | 7,312 |
# -*- coding: utf-8 -*-
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
# limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
api_name = None
resource_name = None
resp_message = 'Good!'
resp_script = None
resp_success = True
resp_template = 'adminpanel/ap-test.html'
resp_type = 'tpl'
resp_render_data = None
make_function = None
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
| codeboy/coddy-sitetools | sitetools/coddy_api/api_resource.py | Python | bsd-3-clause | 1,729 |
# -*- coding: utf-8 -*-
"""Module, which has routines for scd CLI."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import os.path
import sys
import pkg_resources
import six
import scd.config
import scd.files
import scd.utils
import scd.version
try:
import colorama
except ImportError:
colorama = None
DESCRIPTION = """
scd is a tool to manage version strings within your project files.
"""
"""Description of the tool."""
EPILOG = """
Please check GH of the SCD for issues and updates:
https://github.com/9seconds/scd
"""
"""Epilog for the argparser."""
OPTIONS = None
"""Commandline parameters."""
def catch_exceptions(func):
"""Decorator which makes function more CLI friendly.
If everything is ok, it returns :py:data:`os.EX_OK` (code 0), if not
- :py:data:`os.EX_SOFTWARE` (code 70). Also, it is smart enough to
differ verbose and debug mode and print accordingly.
"""
@six.wraps(func)
def decorator():
try:
func()
except Exception as exc:
if OPTIONS:
if OPTIONS.debug:
logging.exception(exc)
elif OPTIONS.verbose:
logging.error(exc)
elif colorama is not None:
print(colorama.Fore.RED + six.text_type(exc),
file=sys.stderr)
else:
print(six.text_type(exc), file=sys.stderr)
return os.EX_SOFTWARE
return os.EX_OK
return decorator
@catch_exceptions
def main():
"""Main function.
Basically, it parses CLI, creates config, traverse files and does
modifications. All that scd does is happening with this function.
"""
global OPTIONS
OPTIONS = get_options()
configure_logging()
logging.debug("Options: %s", OPTIONS)
if OPTIONS.own_version:
dist = pkg_resources.get_distribution("scd")
print(dist.version)
return
config = scd.config.parse(
guess_configfile(),
OPTIONS.version_scheme,
dict(OPTIONS.extra_context))
logging.info("Version is %s", config.version.full)
if OPTIONS.replace_version:
print(config.version.full)
return
for fobj in OPTIONS.files:
fobj.close()
all_files = config.filter_files(OPTIONS.group, OPTIONS.files)
if not scd.files.validate_access(all_files):
logging.error("Cannot process all files, so nothing to do.")
for fileobj in all_files:
logging.info("Start to process %s", fileobj.path)
logging.debug("File object: %s", fileobj)
process_file(fileobj, config)
def get_options():
"""Return parsed commandline arguments.
:return: Parsed commandline arguments
:rtype: :py:class:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description=DESCRIPTION,
epilog=EPILOG)
parser.add_argument(
"-V", "--own-version",
default=False,
action="store_true",
help="print version only.")
parser.add_argument(
"-p", "--replace-version",
default=False,
action="store_true",
help="print version to replace to.")
parser.add_argument(
"-n", "--dry-run",
action="store_true",
default=False,
help="make dry run, do not change anything.")
parser.add_argument(
"-c", "--config",
metavar="CONFIG_PATH",
default=None,
help="path to the config. By default autodiscovery will be performed.")
parser.add_argument(
"-x", "--extra-context",
metavar="CONTEXT_VAR",
default=[],
nargs=argparse.ZERO_OR_MORE,
type=argparse_extra_context_var,
help="Additional context variables. Format is key=value.")
parser.add_argument(
"-g", "--group",
nargs=argparse.ZERO_OR_MORE,
default=[],
help="groups to use for additional filtering.")
parser.add_argument(
"-s", "--version-scheme",
default=None,
choices=sorted(scd.utils.get_version_plugins()),
help="override version-scheme from config.")
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument(
"-d", "--debug",
action="store_true",
default=False,
help="run in debug mode")
verbosity.add_argument(
"-v", "--verbose",
action="store_true",
help="run tool in verbose mode")
parser.add_argument(
"files",
metavar="FILE_PATH",
nargs=argparse.ZERO_OR_MORE,
type=argparse.FileType("rt"),
help=(
"Path to the files where to make version bumping. "
"If nothing is set, all filenames in config will be used."))
return parser.parse_args()
def argparse_extra_context_var(arg):
if "=" not in arg:
raise argparse.ArgumentTypeError(
"context var definition should be in form key=value.")
return arg.split("=", 1)
def process_file(fileobj, config):
"""Function, which is responsible for processing of file.
:param fileobj: File to process.
:type fileobj: :py:class:`scd.files.File`
:param config: Parsed configuration.
:type config: :py:class:`scd.config.Config`
"""
need_to_save = False
file_result = []
with open(fileobj.path, "rt") as filefp:
for line in filefp:
original_line = line
for sr in fileobj.patterns:
line = sr.process(config.version, line)
if original_line != line:
need_to_save = True
if not OPTIONS.dry_run:
file_result.append(line)
if not OPTIONS.dry_run and need_to_save:
logging.debug("Need to save %s", fileobj.path)
with open(fileobj.path, "wt") as filefp:
filefp.writelines(file_result)
else:
logging.debug("No need to save %s", fileobj.path)
def guess_configfile():
"""Return file-like object, guessing where the hell if config file.
:return: Open config.
:rtype: file-like object
:raises ValueError: if cannot find config file.
"""
if OPTIONS.config:
return open(OPTIONS.config, "rt")
config = search_config_in_directory(os.getcwd())
if not config:
result = scd.utils.execute(["git", "rev-parse", "--show-toplevel"])
if result["code"] == os.EX_OK:
config = search_config_in_directory(result["stdout"][0])
if not config:
raise ValueError("Cannot find configfile.")
return open(config, "rt")
def search_config_in_directory(directory):
"""Return config file name if it is found in directory.
:param str directory: Path to the directory where to search config files.
:return: Path to the config file (absolute) or ``None`` if nothing is
found
:rtype: str or None
"""
logging.debug("Search configfile in %s", directory)
names = [".scd.json", "scd.json", ".scd.yaml", "scd.yaml", ".scd.toml",
"scd.toml"]
filenames = set(os.listdir(directory))
for name in names:
if name in filenames:
name = os.path.join(directory, name)
logging.info("Use %s as config file", name)
return name
logging.debug("No suitable configfile in %s", directory)
if colorama:
def configure_logging():
"""Configure logging based on :py:data:`OPTIONS`."""
if OPTIONS.debug:
logging.basicConfig(
level=logging.DEBUG,
format=(
colorama.Style.DIM +
"%(relativeCreated)d " +
colorama.Style.RESET_ALL + "[" +
colorama.Fore.RED + "%(levelname)-7s" +
colorama.Style.RESET_ALL + "] (" +
colorama.Fore.GREEN + "%(module)10s" +
colorama.Style.RESET_ALL + ":" +
colorama.Fore.BLUE + "%(lineno)-3d" +
colorama.Style.RESET_ALL + ") %(message)s"
)
)
elif OPTIONS.verbose:
logging.basicConfig(
level=logging.INFO,
format=(
colorama.Style.DIM +
">>> " +
colorama.Style.RESET_ALL +
"%(message)s"
)
)
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s")
else:
def configure_logging():
"""Configure logging based on :py:data:`OPTIONS`."""
if OPTIONS.debug:
logging.basicConfig(
level=logging.DEBUG,
format=(
"%(relativeCreated)d [%(levelname)-7s] (%(module)10s"
":%(lineno)-3d) %(message)s"
)
)
elif OPTIONS.verbose:
logging.basicConfig(level=logging.INFO, format=">>> %(message)s")
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s")
if __name__ == "__main__":
sys.exit(main())
| 9seconds/scd | scd/main.py | Python | mit | 9,197 |
Experiment(description='PL2 empiricism',
data_dir='../data/tsdlr-renamed/',
max_depth=10,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=400,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2014-02-18-GPSS-add-pl2/',
iters=250,
base_kernels='SE,Per,Lin,Const,Noise',
random_seed=2,
period_heuristic=3,
max_period_heuristic=5,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='pl2',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),
('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', ('*-const', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('CP', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('CW', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('B', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('BL', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('None',), {'A': 'kernel'})]) | ekamioka/gpss-research | experiments/2014-02-18-GPSS-add-pl2.py | Python | mit | 1,774 |
#versa.query.ast
from versa.util import column
class query(object):
'Versa query language abstract syntax tree expression instance'
def traverse(self):
yield self.left
yield self.right
def evaluate(self, ctx):
#Walk the tree and prepare the nodes
if hasattr(self, 'traverse'):
for node in self.traverse():
print(1, node)
if hasattr(node, 'prepare'):
node.prepare(ctx)
class conjunction(query):
def __init__(self, left, right):
self.left = left
self.right = right
def traverse(self):
if hasattr(self.left, 'traverse'):
yield from self.left.traverse()
if hasattr(self.right, 'traverse'):
yield from self.right.traverse()
yield self
def _evaluate(self, ctx):
return bool(self.left._evaluate(ctx) or self.right._evaluate(ctx))
def evaluate(self, ctx):
query.evaluate(self, ctx)
self._evaluate(ctx)
class disjunction(query):
def __init__(self, left, right):
self.left = left
self.right = right
def traverse(self):
if hasattr(self.left, 'traverse'):
yield from self.left.traverse()
if hasattr(self.right, 'traverse'):
yield from self.right.traverse()
yield self
def _evaluate(self, ctx):
return bool(self.left._evaluate(ctx) or self.right._evaluate(ctx))
def evaluate(self, ctx):
query.evaluate(self, ctx)
self._evaluate(ctx)
class stringseq(query):
def __init__(self, items):
self.items = items
def traverse(self):
yield from self.items
yield self
def _evaluate(self, ctx):
return ''.join((i if isinstance(i, str) else i._evaluate(ctx) for i in self.items))
def evaluate(self, ctx):
query.evaluate(self, ctx)
self._evaluate(ctx)
class variable(query):
def __init__(self, name):
self.name = name
self.cached_possible_values = None
def update_match_hints__premature_optimization_edition(self, ctx, role):
possible_values = set(column(ctx.model, role))
#Match_hints are used to narrow down possible variable values based on the contents of the input model
match_roles = ctx.extras.setdefault('match_roles', {})
match_possible_values = ctx.extras.setdefault('match_possible_values', {})
if self.name not in match_roles:
match_roles.setdefault(self.name, []).append(role)
match_possible_values[self.name] = possible_values
elif role not in match_roles[self.name]:
match_roles.setdefault(self.name, []).append(role)
match_possible_values[self.name].intersection_update(possible_values)
print(match_roles)
print(match_possible_values)
return
def _evaluate(self, ctx):
return ctx.extras['match_possible_values'][self.name]
def evaluate(self, ctx):
query.evaluate(self, ctx)
self._evaluate(ctx)
class constant(query):
def __init__(self, name):
self.name = name
def _evaluate(self, ctx):
return ctx.variables[self.name]
def evaluate(self, ctx):
query.evaluate(self, ctx)
self._evaluate(ctx)
class negation(query):
def __init__(self, right):
self.right = right
def traverse(self):
yield self.right
yield self
def _evaluate(self, ctx):
return not bool(self.right._evaluate(ctx))
def evaluate(self, ctx):
query.evaluate(self, ctx)
self._evaluate(ctx)
class funccall(query):
def __init__(self, name, arglist):
self.name = name
self.arglist = arglist
def traverse(self):
for arg in self.arglist:
yield arg
yield self
#def prepare(self, ctx):
#if self.name == '?':
#Match request
#for ix, arg in enumerate(self.arglist):
#if isinstance(arg, variable):
#arg.update_match_hints(ctx, ix)
def _evaluate(self, ctx):
if self.name == '?':
passed_args = [ None if isinstance(a, variable) else a.evaluate(ctx) for a in self.arglist ]
#Match request
result = match_result(ctx)
for ix, link in ctx.model.match(*passed_args):
if isinstance(self.arglist[0], variable):
result.variables = self.arglist[0].name
else:
raise NotImplementedError
def evaluate(self, ctx):
query.evaluate(self, ctx)
self._evaluate(ctx)
class match_result(object):
def __init__(self, context):
self.model = context.transform_factory()
self.variables = {}
def conjoin(other):
pass
| uogbuji/versa | tools/py/query/ast.py | Python | apache-2.0 | 4,818 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.compat.six import string_types
from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
'''
Given a list of mixed task/block data (parsed from YAML),
return a list of Block() objects, where implicit blocks
are created for each bare Task.
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
assert isinstance(ds, (list, type(None)))
block_list = []
if ds:
for block_ds in ds:
b = Block.load(
block_ds,
play=play,
parent_block=parent_block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader
)
# Implicit blocks are created by bare tasks listed in a play without
# an explicit block statement. If we have two implicit blocks in a row,
# squash them down to a single block to save processing time later.
if b._implicit and len(block_list) > 0 and block_list[-1]._implicit:
for t in b.block:
if isinstance(t._parent, (TaskInclude, IncludeRole)):
t._parent._parent = block_list[-1]
else:
t._parent = block_list[-1]
block_list[-1].block.extend(b.block)
else:
block_list.append(b)
return block_list
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
'''
Given a list of task datastructures (parsed from YAML),
return a list of Task() or TaskInclude() objects.
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.playbook.handler_task_include import HandlerTaskInclude
from ansible.template import Templar
assert isinstance(ds, list)
task_list = []
for task_ds in ds:
assert isinstance(task_ds, dict)
if 'block' in task_ds:
t = Block.load(
task_ds,
play=play,
parent_block=block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader,
)
task_list.append(t)
else:
if 'include' in task_ds:
if use_handlers:
include_class = HandlerTaskInclude
else:
include_class = TaskInclude
t = include_class.load(
task_ds,
block=block,
role=role,
task_include=None,
variable_manager=variable_manager,
loader=loader
)
all_vars = variable_manager.get_vars(loader=loader, play=play, task=t)
templar = Templar(loader=loader, variables=all_vars)
# check to see if this include is dynamic or static:
# 1. the user has set the 'static' option to false or true
# 2. one of the appropriate config options was set
if t.static is not None:
is_static = t.static
else:
is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \
(use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
(not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop)
if is_static:
if t.loop is not None:
raise AnsibleParserError("You cannot use 'static' on an include with a loop", obj=task_ds)
# we set a flag to indicate this include was static
t.statically_loaded = True
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = block
cumulative_path = None
found = False
subdir = 'tasks'
if use_handlers:
subdir = 'handlers'
while parent_include is not None:
if not isinstance(parent_include, TaskInclude):
parent_include = parent_include._parent
continue
parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
if cumulative_path is None:
cumulative_path = parent_include_dir
elif not os.path.isabs(cumulative_path):
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
include_target = templar.template(t.args['_raw_params'])
if t._role:
new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path)
include_file = loader.path_dwim_relative(new_basedir, subdir, include_target)
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
if os.path.exists(include_file):
found = True
break
else:
parent_include = parent_include._parent
if not found:
try:
include_target = templar.template(t.args['_raw_params'])
except AnsibleUndefinedVariable:
raise AnsibleParserError(
"Error when evaluating variable in include name: %s.\n\n" \
"When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n" \
"or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n" \
"sources like group or host vars." % t.args['_raw_params'],
obj=task_ds,
suppress_extended_error=True,
)
if t._role:
include_file = loader.path_dwim_relative(t._role._role_path, subdir, include_target)
else:
include_file = loader.path_dwim(include_target)
try:
data = loader.load_from_file(include_file)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleParserError("included task files must contain a list of tasks", obj=data)
# since we can't send callbacks here, we display a message directly in
# the same fashion used by the on_include callback. We also do it here,
# because the recursive nature of helper methods means we may be loading
# nested includes, and we want the include order printed correctly
display.vv("statically included: %s" % include_file)
except AnsibleFileNotFound:
if t.static or \
C.DEFAULT_TASK_INCLUDES_STATIC or \
C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers:
raise
display.deprecated(
"Included file '%s' not found, however since this include is not " \
"explicitly marked as 'static: yes', we will try and include it dynamically " \
"later. In the future, this will be an error unless 'static: no' is used " \
"on the include task. If you do not want missing includes to be considered " \
"dynamic, use 'static: yes' on the include or set the global ansible.cfg " \
"options to make all inclues static for tasks and/or handlers" % include_file,
)
task_list.append(t)
continue
included_blocks = load_list_of_blocks(
data,
play=play,
parent_block=None,
task_include=t.copy(),
role=role,
use_handlers=use_handlers,
loader=loader,
variable_manager=variable_manager,
)
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = t.vars.pop('tags', [])
if isinstance(tags, string_types):
tags = tags.split(',')
if len(tags) > 0:
if len(t.tags) > 0:
raise AnsibleParserError(
"Include tasks should not specify tags in more than one way (both via args and directly on the task). " \
"Mixing styles in which tags are specified is prohibited for whole import hierarchy, not only for single import statement",
obj=task_ds,
suppress_extended_error=True,
)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
else:
tags = t.tags[:]
# now we extend the tags on each of the included blocks
for b in included_blocks:
b.tags = list(set(b.tags).union(tags))
# END FIXME
# FIXME: handlers shouldn't need this special handling, but do
# right now because they don't iterate blocks correctly
if use_handlers:
for b in included_blocks:
task_list.extend(b.block)
else:
task_list.extend(included_blocks)
else:
task_list.append(t)
elif 'include_role' in task_ds:
ir = IncludeRole.load(
task_ds,
block=block,
role=role,
task_include=None,
variable_manager=variable_manager,
loader=loader
)
# 1. the user has set the 'static' option to false or true
# 2. one of the appropriate config options was set
if ir.static is not None:
is_static = ir.static
else:
display.debug('Determine if include_role is static')
# Check to see if this include is dynamic or static:
all_vars = variable_manager.get_vars(loader=loader, play=play, task=ir)
templar = Templar(loader=loader, variables=all_vars)
needs_templating = False
for param in ir.args:
if templar._contains_vars(ir.args[param]):
if not templar.templatable(ir.args[param]):
needs_templating = True
break
is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \
(use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
(not needs_templating and ir.all_parents_static() and not ir.loop)
display.debug('Determined that if include_role static is %s' % str(is_static))
if is_static:
# uses compiled list from object
t = task_list.extend(ir.get_block_list(variable_manager=variable_manager, loader=loader))
else:
# passes task object itself for latter generation of list
t = task_list.append(ir)
else:
if use_handlers:
t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
else:
t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
task_list.append(t)
return task_list
def load_list_of_roles(ds, play, current_role_path=None, variable_manager=None, loader=None):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.role.include import RoleInclude
assert isinstance(ds, list)
roles = []
for role_def in ds:
i = RoleInclude.load(role_def, play=play, current_role_path=current_role_path, variable_manager=variable_manager, loader=loader)
roles.append(i)
return roles
| mckerrj/ansible | lib/ansible/playbook/helpers.py | Python | gpl-3.0 | 15,590 |
# Generated by h2py from d:\mssdk\include\winnetwk.h
WNNC_NET_MSNET = 0x00010000
WNNC_NET_LANMAN = 0x00020000
WNNC_NET_NETWARE = 0x00030000
WNNC_NET_VINES = 0x00040000
WNNC_NET_10NET = 0x00050000
WNNC_NET_LOCUS = 0x00060000
WNNC_NET_SUN_PC_NFS = 0x00070000
WNNC_NET_LANSTEP = 0x00080000
WNNC_NET_9TILES = 0x00090000
WNNC_NET_LANTASTIC = 0x000A0000
WNNC_NET_AS400 = 0x000B0000
WNNC_NET_FTP_NFS = 0x000C0000
WNNC_NET_PATHWORKS = 0x000D0000
WNNC_NET_LIFENET = 0x000E0000
WNNC_NET_POWERLAN = 0x000F0000
WNNC_NET_BWNFS = 0x00100000
WNNC_NET_COGENT = 0x00110000
WNNC_NET_FARALLON = 0x00120000
WNNC_NET_APPLETALK = 0x00130000
WNNC_NET_INTERGRAPH = 0x00140000
WNNC_NET_SYMFONET = 0x00150000
WNNC_NET_CLEARCASE = 0x00160000
WNNC_NET_FRONTIER = 0x00170000
WNNC_NET_BMC = 0x00180000
WNNC_NET_DCE = 0x00190000
WNNC_NET_DECORB = 0x00200000
WNNC_NET_PROTSTOR = 0x00210000
WNNC_NET_FJ_REDIR = 0x00220000
WNNC_NET_DISTINCT = 0x00230000
WNNC_NET_TWINS = 0x00240000
WNNC_NET_RDR2SAMPLE = 0x00250000
RESOURCE_CONNECTED = 0x00000001
RESOURCE_GLOBALNET = 0x00000002
RESOURCE_REMEMBERED = 0x00000003
RESOURCE_RECENT = 0x00000004
RESOURCE_CONTEXT = 0x00000005
RESOURCETYPE_ANY = 0x00000000
RESOURCETYPE_DISK = 0x00000001
RESOURCETYPE_PRINT = 0x00000002
RESOURCETYPE_RESERVED = 0x00000008
RESOURCETYPE_UNKNOWN = 0xFFFFFFFF
RESOURCEUSAGE_CONNECTABLE = 0x00000001
RESOURCEUSAGE_CONTAINER = 0x00000002
RESOURCEUSAGE_NOLOCALDEVICE = 0x00000004
RESOURCEUSAGE_SIBLING = 0x00000008
RESOURCEUSAGE_ATTACHED = 0x00000010
RESOURCEUSAGE_ALL = (RESOURCEUSAGE_CONNECTABLE | RESOURCEUSAGE_CONTAINER | RESOURCEUSAGE_ATTACHED)
RESOURCEUSAGE_RESERVED = 0x80000000
RESOURCEDISPLAYTYPE_GENERIC = 0x00000000
RESOURCEDISPLAYTYPE_DOMAIN = 0x00000001
RESOURCEDISPLAYTYPE_SERVER = 0x00000002
RESOURCEDISPLAYTYPE_SHARE = 0x00000003
RESOURCEDISPLAYTYPE_FILE = 0x00000004
RESOURCEDISPLAYTYPE_GROUP = 0x00000005
RESOURCEDISPLAYTYPE_NETWORK = 0x00000006
RESOURCEDISPLAYTYPE_ROOT = 0x00000007
RESOURCEDISPLAYTYPE_SHAREADMIN = 0x00000008
RESOURCEDISPLAYTYPE_DIRECTORY = 0x00000009
RESOURCEDISPLAYTYPE_TREE = 0x0000000A
RESOURCEDISPLAYTYPE_NDSCONTAINER = 0x0000000B
NETPROPERTY_PERSISTENT = 1
CONNECT_UPDATE_PROFILE = 0x00000001
CONNECT_UPDATE_RECENT = 0x00000002
CONNECT_TEMPORARY = 0x00000004
CONNECT_INTERACTIVE = 0x00000008
CONNECT_PROMPT = 0x00000010
CONNECT_NEED_DRIVE = 0x00000020
CONNECT_REFCOUNT = 0x00000040
CONNECT_REDIRECT = 0x00000080
CONNECT_LOCALDRIVE = 0x00000100
CONNECT_CURRENT_MEDIA = 0x00000200
CONNECT_DEFERRED = 0x00000400
CONNECT_RESERVED = 0xFF000000
CONNDLG_RO_PATH = 0x00000001
CONNDLG_CONN_POINT = 0x00000002
CONNDLG_USE_MRU = 0x00000004
CONNDLG_HIDE_BOX = 0x00000008
CONNDLG_PERSIST = 0x00000010
CONNDLG_NOT_PERSIST = 0x00000020
DISC_UPDATE_PROFILE = 0x00000001
DISC_NO_FORCE = 0x00000040
UNIVERSAL_NAME_INFO_LEVEL = 0x00000001
REMOTE_NAME_INFO_LEVEL = 0x00000002
WNFMT_MULTILINE = 0x01
WNFMT_ABBREVIATED = 0x02
WNFMT_INENUM = 0x10
WNFMT_CONNECTION = 0x20
NETINFO_DLL16 = 0x00000001
NETINFO_DISKRED = 0x00000004
NETINFO_PRINTERRED = 0x00000008
RP_LOGON = 0x01
RP_INIFILE = 0x02
PP_DISPLAYERRORS = 0x01
WNCON_FORNETCARD = 0x00000001
WNCON_NOTROUTED = 0x00000002
WNCON_SLOWLINK = 0x00000004
WNCON_DYNAMIC = 0x00000008
| JulienMcJay/eclock | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/Demos/win32wnet/winnetwk.py | Python | gpl-2.0 | 3,187 |
from __future__ import unicode_literals
import os
import sys
from .prod import *
DEBUG = True
TEMPLATE_DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (('Adi', 'adi@u.northwestern.edu'), )
# Secret key generator: https://djskgen.herokuapp.com/
# You should set your key as an environ variable
SECRET_KEY = os.environ.get("SECRET_KEY", "uxi*44khd4ao#f!8ux$+^1=f*7r6thl@14y-4#q2*14ci4%zre")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'sighht_deploy',
'USER': 'postgres',
'PASSWORD': 'Ad!020687shEsh',
'HOST': 'localhost',
'PORT': '5432',
}
}
| adiyengar/Spirit | example/project/settings/prod_local.py | Python | mit | 781 |
# -*- coding: utf-8 -*-
import unittest
import tests.envs as envs
import pyoxford
class TestTranslator(unittest.TestCase):
def test_detect(self):
api = pyoxford.translator(envs.FILE_PATH)
result = api.detect("I'm testing translator api.")
self.assertEqual("en", result)
def test_translate(self):
api = pyoxford.translator(envs.FILE_PATH)
result = api.translate("My name is John.", "ja")
self.assertTrue("私の名前はジョンです。", result)
| icoxfog417/pyoxford | tests/test_translator.py | Python | mit | 508 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Building'
db.create_table(u'FingerprintsREST_building', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, db_index=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('FingerprintsREST', ['Building'])
# Adding model 'Device'
db.create_table(u'FingerprintsREST_device', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64, db_index=True)),
('version', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('manufacturer', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
))
db.send_create_signal('FingerprintsREST', ['Device'])
# Adding model 'Location'
db.create_table(u'FingerprintsREST_location', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('description', self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True)),
('room', self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True)),
('building', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['FingerprintsREST.Building'])),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('FingerprintsREST', ['Location'])
# Adding unique constraint on 'Location', fields ['name', 'building']
db.create_unique(u'FingerprintsREST_location', ['name', 'building_id'])
# Adding model 'BaseStation'
db.create_table(u'FingerprintsREST_basestation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('bssid', self.gf('django.db.models.fields.CharField')(max_length=256, db_index=True)),
('ssid', self.gf('django.db.models.fields.CharField')(max_length=256)),
('frequency', self.gf('django.db.models.fields.IntegerField')()),
('manufacturer', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('model', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('FingerprintsREST', ['BaseStation'])
# Adding unique constraint on 'BaseStation', fields ['bssid', 'frequency']
db.create_unique(u'FingerprintsREST_basestation', ['bssid', 'frequency'])
# Adding model 'Fingerprint'
db.create_table(u'FingerprintsREST_fingerprint', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('location', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['FingerprintsREST.Location'], blank=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')()),
('device', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['FingerprintsREST.Device'], blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('direction', self.gf('django.db.models.fields.FloatField')()),
('magnitude', self.gf('django.db.models.fields.FloatField')()),
('zaxis', self.gf('django.db.models.fields.FloatField')()),
('confirmed', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('FingerprintsREST', ['Fingerprint'])
# Adding model 'Scan'
db.create_table(u'FingerprintsREST_scan', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('level', self.gf('django.db.models.fields.IntegerField')()),
('base_station', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['FingerprintsREST.BaseStation'])),
('fingerprint', self.gf('django.db.models.fields.related.ForeignKey')(related_name='scans', to=orm['FingerprintsREST.Fingerprint'])),
))
db.send_create_signal('FingerprintsREST', ['Scan'])
def backwards(self, orm):
# Removing unique constraint on 'BaseStation', fields ['bssid', 'frequency']
db.delete_unique(u'FingerprintsREST_basestation', ['bssid', 'frequency'])
# Removing unique constraint on 'Location', fields ['name', 'building']
db.delete_unique(u'FingerprintsREST_location', ['name', 'building_id'])
# Deleting model 'Building'
db.delete_table(u'FingerprintsREST_building')
# Deleting model 'Device'
db.delete_table(u'FingerprintsREST_device')
# Deleting model 'Location'
db.delete_table(u'FingerprintsREST_location')
# Deleting model 'BaseStation'
db.delete_table(u'FingerprintsREST_basestation')
# Deleting model 'Fingerprint'
db.delete_table(u'FingerprintsREST_fingerprint')
# Deleting model 'Scan'
db.delete_table(u'FingerprintsREST_scan')
models = {
'FingerprintsREST.basestation': {
'Meta': {'unique_together': "(('bssid', 'frequency'),)", 'object_name': 'BaseStation'},
'bssid': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'ssid': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'FingerprintsREST.building': {
'Meta': {'object_name': 'Building'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'FingerprintsREST.device': {
'Meta': {'object_name': 'Device'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
},
'FingerprintsREST.fingerprint': {
'Meta': {'object_name': 'Fingerprint'},
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['FingerprintsREST.Device']", 'blank': 'True'}),
'direction': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['FingerprintsREST.Location']", 'blank': 'True'}),
'magnitude': ('django.db.models.fields.FloatField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'zaxis': ('django.db.models.fields.FloatField', [], {})
},
'FingerprintsREST.location': {
'Meta': {'unique_together': "(('name', 'building'),)", 'object_name': 'Location'},
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['FingerprintsREST.Building']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'room': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'})
},
'FingerprintsREST.scan': {
'Meta': {'object_name': 'Scan'},
'base_station': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['FingerprintsREST.BaseStation']"}),
'fingerprint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scans'", 'to': "orm['FingerprintsREST.Fingerprint']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['FingerprintsREST'] | alexf101/indoor-tracking | FingerprintsREST/migrations/0001_initial.py | Python | gpl-2.0 | 12,882 |
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming
import sys, os
import essentia.standard as ess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import stft as STFT
def f0Yin(x, N, H, minf0, maxf0):
# fundamental frequency detection using the Yin algorithm
# x: input sound, N: window size,
# minf0: minimum f0 frequency in Hz, maxf0: maximim f0 frequency in Hz,
# returns f0
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=N, type='hann')
pitchYin= ess.PitchYinFFT(minFrequency = minf0, maxFrequency = maxf0)
pin = 0
pend = x.size-N
f0 = []
while pin<pend:
mX = spectrum(window(x[pin:pin+N]))
f0t = pitchYin(mX)
f0 = np.append(f0, f0t[0])
pin += H
return f0
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/vignesh.wav')
plt.figure(1, figsize=(9, 7))
N = 2048
H = 256
w = hamming(2048)
mX, pX = STFT.stftAnal(x, fs, w, N, H)
maxplotfreq = 2000.0
frmTime = H*np.arange(mX[:,0].size)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
N = 2048
minf0 = 130
maxf0 = 300
H = 256
f0 = f0Yin(x, N, H, minf0, maxf0)
yf0 = UF.sinewaveSynth(f0, .8, H, fs)
frmTime = H*np.arange(f0.size)/float(fs)
plt.plot(frmTime, f0, linewidth=2, color='k')
plt.autoscale(tight=True)
plt.title('mX + f0 (vignesh.wav), YIN: N=2048, H = 256 ')
plt.tight_layout()
plt.savefig('f0Yin.png')
UF.wavwrite(yf0, fs, 'f0Yin.wav')
plt.show()
| timqian/sms-tools | lectures/6-Harmonic-model/plots-code/f0Yin.py | Python | agpl-3.0 | 1,719 |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import stevedore
import testtools
import mock
from oslo_messaging._executors import impl_thread
try:
from oslo_messaging import opts
except ImportError:
opts = None
from oslo_messaging.tests import utils as test_utils
class OptsTestCase(test_utils.BaseTestCase):
@testtools.skipIf(opts is None, "Options not importable")
def setUp(self):
super(OptsTestCase, self).setUp()
def _test_list_opts(self, result):
self.assertEqual(6, len(result))
groups = [g for (g, l) in result]
self.assertIn(None, groups)
self.assertIn('matchmaker_ring', groups)
self.assertIn('matchmaker_redis', groups)
self.assertIn('oslo_messaging_amqp', groups)
self.assertIn('oslo_messaging_rabbit', groups)
self.assertIn('oslo_messaging_qpid', groups)
opt_names = [o.name for (g, l) in result for o in l]
self.assertIn('rpc_backend', opt_names)
def test_list_opts(self):
self._test_list_opts(opts.list_opts())
def test_entry_point(self):
result = None
for ext in stevedore.ExtensionManager('oslo.config.opts',
invoke_on_load=True):
if ext.name == "oslo.messaging":
result = ext.obj
break
self.assertIsNotNone(result)
self._test_list_opts(result)
def test_defaults(self):
impl_thread.ThreadExecutor(self.conf, mock.Mock(), mock.Mock())
opts.set_defaults(self.conf, executor_thread_pool_size=100)
self.assertEqual(100, self.conf.executor_thread_pool_size)
| stevei101/oslo.messaging | oslo_messaging/tests/test_opts.py | Python | apache-2.0 | 2,216 |
# -*- coding: utf-8 -*-
"""
sphinx.pycode.nodes
~~~~~~~~~~~~~~~~~~~
Parse tree node implementations.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class BaseNode(object):
"""
Node superclass for both terminal and nonterminal nodes.
"""
parent = None
def _eq(self, other):
raise NotImplementedError
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
def __ne__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
__hash__ = None
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def get_next_sibling(self):
"""Return next child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
def get_prev_leaf(self):
"""Return the leaf node that precedes this node in the parse tree."""
def last_child(node):
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return last_child(node.children[-1])
if self.parent is None:
return None
prev = self.get_prev_sibling()
if isinstance(prev, Leaf):
return prev
elif prev is not None:
return last_child(prev)
return self.parent.get_prev_leaf()
def get_next_leaf(self):
"""Return self if leaf, otherwise the leaf node that succeeds this
node in the parse tree.
"""
node = self
while not isinstance(node, Leaf):
assert node.children
node = node.children[0]
return node
def get_lineno(self):
"""Return the line number which generated the invocant node."""
return self.get_next_leaf().lineno
def get_prefix(self):
"""Return the prefix of the next leaf node."""
# only leaves carry a prefix
return self.get_next_leaf().prefix
class Node(BaseNode):
"""
Node implementation for nonterminals.
"""
def __init__(self, type, children, context=None):
# type of nonterminals is >= 256
# assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
# assert ch.parent is None, repr(ch)
ch.parent = self
def __repr__(self):
return '%s(%s, %r)' % (self.__class__.__name__,
self.type, self.children)
def __str__(self):
"""This reproduces the input source exactly."""
return ''.join(map(str, self.children))
def _eq(self, other):
return (self.type, self.children) == (other.type, other.children)
# support indexing the node directly instead of .children
def __getitem__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def __len__(self):
return len(self.children)
class Leaf(BaseNode):
"""
Node implementation for leaf nodes (terminals).
"""
prefix = '' # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value, context=None):
# type of terminals is below 256
# assert 0 <= type < 256, type
self.type = type
self.value = value
if context is not None:
self.prefix, (self.lineno, self.column) = context
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.type, self.value, self.prefix)
def __str__(self):
"""This reproduces the input source exactly."""
return self.prefix + str(self.value)
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def convert(grammar, raw_node):
"""Convert raw node to a Node or Leaf instance."""
type, value, context, children = raw_node
if children or type in grammar.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
def nice_repr(node, number2name, prefix=False):
def _repr(node):
if isinstance(node, Leaf):
return "%s(%r)" % (number2name[node.type], node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_repr, node.children)))
def _prepr(node):
if isinstance(node, Leaf):
return "%s(%r, %r)" % (number2name[node.type],
node.prefix, node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_prepr, node.children)))
return (prefix and _prepr or _repr)(node)
class NodeVisitor(object):
def __init__(self, number2name, *args):
self.number2name = number2name
self.init(*args)
def init(self, *args):
pass
def visit(self, node):
"""Visit a node."""
method = 'visit_' + self.number2name[node.type]
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
if isinstance(node, Node):
for child in node:
self.visit(child)
| axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/pycode/nodes.py | Python | apache-2.0 | 6,392 |
"""Support for fans through the SmartThings cloud API."""
from __future__ import annotations
from collections.abc import Sequence
import math
from pysmartthings import Capability
from homeassistant.components.fan import SUPPORT_SET_SPEED, FanEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
SPEED_RANGE = (1, 3) # off is not included
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add fans for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsFan(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "fan")
]
)
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None:
"""Return all capabilities supported if minimum required are present."""
supported = [Capability.switch, Capability.fan_speed]
# Must have switch and fan_speed
if all(capability in capabilities for capability in supported):
return supported
return None
class SmartThingsFan(SmartThingsEntity, FanEntity):
"""Define a SmartThings Fan."""
async def async_set_percentage(self, percentage: int | None) -> None:
"""Set the speed percentage of the fan."""
if percentage is None:
await self._device.switch_on(set_status=True)
elif percentage == 0:
await self._device.switch_off(set_status=True)
else:
value = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
await self._device.set_fan_speed(value, set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_turn_on(
self,
speed: str | None = None,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs,
) -> None:
"""Turn the fan on."""
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the fan off."""
await self._device.switch_off(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
@property
def is_on(self) -> bool:
"""Return true if fan is on."""
return self._device.status.switch
@property
def percentage(self) -> int:
"""Return the current speed percentage."""
return ranged_value_to_percentage(SPEED_RANGE, self._device.status.fan_speed)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return int_states_in_range(SPEED_RANGE)
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
| rohitranjan1991/home-assistant | homeassistant/components/smartthings/fan.py | Python | mit | 3,421 |
#
# Author: Henrique Pereira Coutada Miranda
# Tests for yambopy
# Si
#
import matplotlib
import unittest
import sys
import os
import shutil
import argparse
import subprocess
import filecmp
from yambopy import *
from qepy import *
class TestPW_Si(unittest.TestCase):
""" This class creates the input files for Si and compares them to reference files
"""
def get_inputfile(self):
qe = PwIn()
qe.atoms = [['Si',[0.125,0.125,0.125]],
['Si',[-.125,-.125,-.125]]]
qe.atypes = {'Si': [28.086,"Si.pbe-mt_fhi.UPF"]}
qe.control['prefix'] = "'si'"
qe.control['wf_collect'] = '.true.'
qe.control['pseudo_dir'] = "'../pseudos'"
qe.system['celldm(1)'] = 10.3
qe.system['ecutwfc'] = 40
qe.system['occupations'] = "'fixed'"
qe.system['nat'] = 2
qe.system['ntyp'] = 1
qe.system['ibrav'] = 2
qe.kpoints = [4, 4, 4]
qe.electrons['conv_thr'] = 1e-8
return qe
def test_pw_input_relax(self):
""" Generate a silicon pw.x input file for the relaxation cycle
"""
if not os.path.isdir('relax'):
os.mkdir('relax')
qe = self.get_inputfile()
qe.control['calculation'] = "'vc-relax'"
qe.ions['ion_dynamics'] = "'bfgs'"
qe.cell['cell_dynamics'] = "'bfgs'"
qe.write('relax/si.scf')
self.assertEqual(filecmp.cmp('relax/si.scf', 'reference_si/relax_si.scf'),True)
def test_pw_input_scf(self):
""" Generate a silicon pw.x input file for the self consistent cycle
"""
if not os.path.isdir('scf'):
os.mkdir('scf')
qe = self.get_inputfile()
qe.control['calculation'] = "'scf'"
qe.write('scf/si.scf')
self.assertEqual(filecmp.cmp('scf/si.scf', 'reference_si/scf_si.scf'),True)
def test_pw_input_nscf(self):
""" Generate a silicon pw.x input file for the non self consistent cycle
"""
if not os.path.isdir('nscf'):
os.mkdir('nscf')
qe = self.get_inputfile()
qe.control['calculation'] = "'nscf'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-8
qe.system['nbnd'] = 30
qe.system['force_symmorphic'] = ".true."
qe.kpoints = [2, 2, 2]
qe.write('nscf/si.nscf')
self.assertEqual(filecmp.cmp('nscf/si.nscf', 'reference_si/nscf_si.nscf'),True)
class TestPW_Si_Run(unittest.TestCase):
""" This class creates the input files and runs the pw.x code
"""
def test_pw_si(sef):
""" Run relaxation, self consistent cycle and non self consistent cycle
"""
print("\nstep 1: relax")
os.system('cd relax; pw.x < si.scf > si.scf.log')
e = PwXML('si',path='relax')
pos = e.get_scaled_positions()
q = PwIn('scf/si.scf')
print("old celldm(1)", q.system['celldm(1)'])
q.system['celldm(1)'] = e.cell[0][2]*2
print("new celldm(1)", q.system['celldm(1)'])
q.atoms = list(zip([a[0] for a in q.atoms],pos))
q.write('scf/si.scf')
print("step 2: scf")
os.system('cd scf; pw.x < si.scf > si.scf.log')
os.system('cp -r scf/si.save nscf')
print("step 3: nscf")
os.system('cd nscf; pw.x < si.nscf > si.nscf.log')
class TestYamboPrep_Si(unittest.TestCase):
def test_yambo_preparation(self):
""" Run p2y and yambo to prepare the database
"""
if not os.path.isdir('database'):
os.mkdir('database')
os.system('cd nscf/si.save; p2y 2> ../../database/p2y.log')
os.system('cd nscf/si.save; yambo 2> ../../database/yambo.log')
os.system('mv nscf/si.save/SAVE database')
class TestYamboIn_GW_Si(unittest.TestCase):
def setUp(self):
""" Prepare the databases
"""
if not os.path.isdir('gw/SAVE'):
os.makedirs('gw/SAVE')
if not os.path.isdir('gw_conv/SAVE'):
os.makedirs('gw_conv/SAVE')
if not os.path.isdir('database/SAVE'):
os.makedirs('database')
os.system('cd database; tar xfz ../reference_si/yambo_gw_conv/gw_conv.tar.gz')
os.system('cp -r database/SAVE gw')
os.system('cp -r database/SAVE gw_conv')
def test_gw_input(self):
""" Test if we can initialize the YamboIn class for a typical GW input file
"""
y = YamboIn('yambo -p p -g n -V all',folder='gw')
def test_gw_convergence(self):
""" Test if we can generate multiple input files changing some variables
"""
y = YamboIn('yambo -p p -g n -V all',folder='gw_conv')
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXp': [[1,2,5], 'Ry'],
'BndsRnXp': [[1,10],[1,20],[1,30]] }
y.optimize(conv)
return y
class TestYamboIn_GW_Si_Run(unittest.TestCase):
def test_yambo_gw_si(self):
""" Run GW calculation with yambo
"""
y = YamboIn('yambo -p p -g n -V all',folder='gw_conv')
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXp': [[1,2,5], 'Ry'],
'BndsRnXp': [[1,10],[1,20],[1,30]] }
y.optimize(conv)
print()
def run(filename):
folder = filename.split('.')[0]
print(filename, folder)
os.system('cd gw_conv; yambo -F %s -J %s -C %s 2> %s.log'%(filename,folder,folder,folder))
y.optimize(conv,run=run)
class TestYamboIn_BSE_Si(unittest.TestCase):
def setUp(self):
""" Prepare the databases
"""
if not os.path.isdir('bse/SAVE'):
os.makedirs('bse/SAVE')
if not os.path.isdir('bse_conv/SAVE'):
os.makedirs('bse_conv/SAVE')
if not os.path.isdir('database/SAVE'):
os.makedirs('database')
os.system('cd database; tar xfz ../reference_si/yambo_bse_conv/bse_conv.tar.gz')
os.system('cp -r database/SAVE bse')
os.system('cp -r database/SAVE bse_conv')
def test_bse_input(self):
""" Test if we can initialize the YamboIn class for a typical BSE input file
"""
y = YamboIn('yambo -b -o b -k sex -y h -V all',folder='bse')
def test_bse_convergence(self):
""" Test if we can generate multiple input files changing some variables
"""
y = YamboIn('yambo -b -o b -k sex -y h -V all',folder='bse_conv')
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXs': [[1,2,5], 'Ry'],
'BndsRnXs': [[1,10],[1,20],[1,30]] }
y.optimize(conv)
return y
class TestYamboIn_BSE_Si_Run(unittest.TestCase):
def test_yambo_bse_si(self):
""" Run BSE calculation with yambo
"""
y = YamboIn('yambo -b -o b -k sex -y h -V all',folder='bse_conv')
conv = { 'FFTGvecs': [[5,10,15],'Ry'],
'NGsBlkXs': [[1,2,5], 'Ry'],
'BndsRnXs': [[1,10],[1,20],[1,30]] }
print()
def run(filename):
folder = filename.split('.')[0]
print(filename, folder)
os.system('cd bse_conv; yambo -F %s -J %s -C %s 2> %s.log'%(filename,folder,folder,folder))
y.optimize(conv,run=run)
class TestYamboOut_BSE_Si(unittest.TestCase):
def test_yamboout_bse_si(self):
""" Read the yambo BSE output files and write them as .json
"""
for dirpath,dirnames,filenames in os.walk('bse_conv'):
#check if there are some output files in the folder
if ([ f for f in filenames if 'o-' in f ]):
y = YamboOut(dirpath,save_folder='bse_conv')
y.pack()
def test_yamboanalyse_bse_si(self):
""" Analyse the BSE .json output files
"""
y = YamboAnalyser('bse_conv')
y.plot_bse('eps')
class TestYamboOut_GW_Si(unittest.TestCase):
def test_yamboout_gw_si(self):
""" Read the yambo GW output files
"""
for dirpath,dirnames,filenames in os.walk('gw_conv'):
#check if there are some output files in the folder
if ([ f for f in filenames if 'o-' in f ]):
y = YamboOut(dirpath,save_folder='gw_conv')
y.pack()
def test_yamboanalyse_gw_si(self):
""" Analyse the yambo GW .json output files
"""
y = YamboAnalyser('gw_conv')
y.plot_gw('qp')
if __name__ == '__main__':
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-i','--input', action="store_true",
help='Generate the input files and compare with the reference ones')
parser.add_argument('-f','--full', action="store_true",
help='Generate the input files, run them and compare the results')
parser.add_argument('-c','--clean', action="store_true",
help='Clean all the data from a previous run')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
#first test if yambo is installed
sp = subprocess.PIPE
yambo_not_available = subprocess.call("yambo", shell=True, stdout=sp, stderr=sp)
if yambo_not_available:
print("yambo not found, please install it before running the tests")
sys.exit(1)
# Count the number of errors
nerrors = 0
ul = unittest.TestLoader()
tr = unittest.TextTestRunner(verbosity=2)
#
# Test pw.x
#
suite = ul.loadTestsFromTestCase(TestPW_Si)
nerrors += not tr.run(suite).wasSuccessful()
if args.full:
suite = ul.loadTestsFromTestCase(TestPW_Si_Run)
nerrors += not tr.run(suite).wasSuccessful()
#
# Test p2y and yambo
#
if args.full:
suite = ul.loadTestsFromTestCase(TestYamboPrep_Si)
nerrors += not tr.run(suite).wasSuccessful()
#
# Test GW on yambo
#
suite = ul.loadTestsFromTestCase(TestYamboIn_GW_Si)
nerrors += not tr.run(suite).wasSuccessful()
if args.full:
suite = ul.loadTestsFromTestCase(TestYamboIn_GW_Si_Run)
nerrors += not tr.run(suite).wasSuccessful()
#
# Test BSE on yambo
#
suite = ul.loadTestsFromTestCase(TestYamboIn_BSE_Si)
nerrors += not tr.run(suite).wasSuccessful()
if args.full:
suite = ul.loadTestsFromTestCase(TestYamboIn_BSE_Si_Run)
nerrors += not tr.run(suite).wasSuccessful()
suite = ul.loadTestsFromTestCase(TestYamboOut_GW_Si)
nerrors += not tr.run(suite).wasSuccessful()
suite = ul.loadTestsFromTestCase(TestYamboOut_BSE_Si)
nerrors += not tr.run(suite).wasSuccessful()
#clean tests
if args.clean:
print("cleaning...")
os.system('rm -rf scf bse bse_conv gw gw_conv nscf relax database proj.in')
print("done!")
sys.exit(nerrors)
| henriquemiranda/yambopy | tests/test_si.py | Python | bsd-3-clause | 10,900 |
# Copyright 2019 Alfredo de la Fuente - AvanzOSC
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class ContractLine(models.Model):
_inherit = 'contract.line'
payment_percentage = fields.Float(string='Percentage', default=100.0)
user_id = fields.Many2one(
comodel_name='res.users', string='User')
observations = fields.Text(string='Observations')
partner_id = fields.Many2one(
comodel_name="res.partner", string="Partner",
related="contract_id.partner_id", store=True)
child_id = fields.Many2one(
comodel_name="res.partner", string="Student",
related="contract_id.child_id", store=True)
course_id = fields.Many2one(
comodel_name="education.course", string="Education Course",
related="contract_id.course_id", store=True)
school_id = fields.Many2one(
comodel_name="res.partner", string="Education Center",
related="contract_id.school_id", store=True)
academic_year_id = fields.Many2one(
comodel_name="education.academic_year", string="Academic Year",
related="contract_id.academic_year_id", store=True)
pricelist_id = fields.Many2one(
comodel_name="product.pricelist", string="Pricelist",
related="contract_id.pricelist_id", store=True)
price_subtotal = fields.Float(store=True)
price_unit = fields.Float(store=True)
@api.multi
@api.depends('quantity', 'price_unit', 'discount', 'payment_percentage')
def _compute_price_subtotal(self):
super(ContractLine, self)._compute_price_subtotal()
for line in self.filtered(lambda x: x.payment_percentage != 100.0):
line.price_subtotal = (
line.price_subtotal * line.payment_percentage) / 100
@api.model
def _prepare_invoice_line(self, invoice_id=False, invoice_values=False):
self.ensure_one()
res = super(ContractLine, self)._prepare_invoice_line(
invoice_id=invoice_id, invoice_values=invoice_values)
res['payment_percentage'] = self.payment_percentage
return res
| oihane/odoo-addons | contract_school/models/contract_line.py | Python | agpl-3.0 | 2,130 |
# -*- coding: utf8 -*-
"""
일정 하중 아래 허용응력과 안전률을 고려한 봉의 최소 직경
"""
# 수학 관련 기능을 담은 모듈을 읽어들임
import math
# 1변수 방정식의 해법을 담은 root_finding.py 의 위치를 import 경로에 추가
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.pardir, 'lab_02_root_finding')))
# 1변수 방정식의 해법을 담은 root_finding.py 의 위치를 import 경로에 추가 끝
import root_finding
# class 정의 시작
# 객체의 모양틀이 되어 줌
# 객체를 사용하면, 문제에 관련된 다른 변수들을 지정, 변경하는 것이 쉬워짐
class Experiment(object):
def __init__(self):
# 안전률
self.safety_factor = 2.0
# 최대 허용 응력
self.stress_max_Pa = 207e6
# 힘
self.force_N = None
def problem_to_solve(self, radius_m):
"""
안전률을 고려한 허용 응력
:param radius_m:
:return:
"""
if self.force_N is None:
print ".force_None 값이 지정되지 않았음"
result = None
else:
result = circular_section_stress(radius_m, self.force_N) \
- self.stress_max_Pa / self.safety_factor
return result
def circular_section_stress(r_m, force_N):
"""
원형 단면의 응력을 계산
:param r_m:
:param force_N:
:return:
"""
area_m2 = r_m * r_m * math.pi
stress_Pa = force_N / area_m2
return stress_Pa
def main():
# 객체를 만듦
experiment = Experiment()
# 객체의 변수 force_N 을 지정하기 위해 사용자 입력을 받아 들임
# force_N 에는 문자열? 실수 중 어떤 것이 저장될 것인가?
experiment.force_N = float(raw_input("Enter force (N):"))
# 예를 들어 허용 최대 응력이나 안전률을 변경하는 것도 객체를 수정 하는 대신 main() 함수 값을 바꾸면 됨
# 2분법을 이용하여 허용 응력과 안전률을 고려한 최소 직경을 구함
# 더 적은 초기값
x_l_init = root_finding.epsilon_global * 2
# 더 큰 초기값
x_h_init = 1.0
# 2분법 호출
result = root_finding.bisection(experiment.problem_to_solve, x_l_init, x_h_init, 1e-9)
# 결과 표시
print "result =", result
if "__main__" == __name__:
main()
| kangwonlee/ECA | lab_05_python_oop/yield.py | Python | apache-2.0 | 2,411 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
""" Modul zur Extrahierung von Text aus einer .doc-Datei """
import subprocess
import loggingModule
def doc_txt(filename):
try:
process = subprocess.Popen(
['catdoc', '-w', filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
text = process.stdout.read()
process.stdout.close()
return text
except Exception as e:
loggingModule.logger4.error(e)
| sikienzl/TextExtractor | converter/current/docTxt.py | Python | mit | 469 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Anscombe/trend_MovingMedian/cycle_5/ar_12/test_artificial_32_Anscombe_MovingMedian_5_12_20.py | Python | bsd-3-clause | 266 |
"""No email uniqueness on database level
Revision ID: 266ab139e8ab
Revises: 1e296c7f5e8c
Create Date: 2014-07-18 14:55:17.188121
"""
# revision identifiers, used by Alembic.
revision = '266ab139e8ab'
down_revision = '1e296c7f5e8c'
from alembic import op
def upgrade():
op.drop_constraint(u'user_email_key', 'user')
def downgrade():
op.create_unique_constraint(u'user_email_key', 'user', ['email'])
| phihag/adhocracy | src/adhocracy/alembic/versions/266ab139e8ab_no_email_uniqueness_on_database_level.py | Python | agpl-3.0 | 414 |
"""Tests for the resubmit_error_certificates management command. """
import ddt
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from mock import patch
from opaque_keys.edx.locator import CourseLocator
from six import text_type
from six.moves import range
from badges.events.course_complete import get_completion_badge
from badges.models import BadgeAssertion
from badges.tests.factories import BadgeAssertionFactory, CourseCompleteImageConfigurationFactory
from course_modes.models import CourseMode
from lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate
from lms.djangoapps.grades.tests.utils import mock_passing_grade
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
class CertificateManagementTest(ModuleStoreTestCase):
"""
Base test class for Certificate Management command tests.
"""
# Override with the command module you wish to test.
command = 'resubmit_error_certificates'
def setUp(self):
super(CertificateManagementTest, self).setUp()
self.user = UserFactory.create()
self.courses = [
CourseFactory.create()
for __ in range(3)
]
for course in self.courses:
chapter = ItemFactory.create(parent_location=course.location)
ItemFactory.create(parent_location=chapter.location, category='sequential', graded=True)
CourseCompleteImageConfigurationFactory.create()
def _create_cert(self, course_key, user, status, mode=CourseMode.HONOR):
"""Create a certificate entry. """
# Enroll the user in the course
CourseEnrollmentFactory.create(
user=user,
course_id=course_key,
mode=mode
)
# Create the certificate
GeneratedCertificate.eligible_certificates.create(
user=user,
course_id=course_key,
status=status
)
def _assert_cert_status(self, course_key, user, expected_status):
"""Check the status of a certificate. """
cert = GeneratedCertificate.eligible_certificates.get(user=user, course_id=course_key)
self.assertEqual(cert.status, expected_status)
@ddt.ddt
class ResubmitErrorCertificatesTest(CertificateManagementTest):
"""Tests for the resubmit_error_certificates management command. """
ENABLED_SIGNALS = ['course_published']
@ddt.data(CourseMode.HONOR, CourseMode.VERIFIED)
def test_resubmit_error_certificate(self, mode):
# Create a certificate with status 'error'
self._create_cert(self.courses[0].id, self.user, CertificateStatuses.error, mode)
# Re-submit all certificates with status 'error'
with check_mongo_calls(1):
call_command(self.command)
# Expect that the certificate was re-submitted
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.notpassing)
def test_resubmit_error_certificate_in_a_course(self):
# Create a certificate with status 'error'
# in three courses.
for idx in range(3):
self._create_cert(self.courses[idx].id, self.user, CertificateStatuses.error)
# Re-submit certificates for two of the courses
call_command(self.command, course_key_list=[
text_type(self.courses[0].id),
text_type(self.courses[1].id)
])
# Expect that the first two courses have been re-submitted,
# but not the third course.
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.notpassing)
self._assert_cert_status(self.courses[1].id, self.user, CertificateStatuses.notpassing)
self._assert_cert_status(self.courses[2].id, self.user, CertificateStatuses.error)
@ddt.data(
CertificateStatuses.deleted,
CertificateStatuses.deleting,
CertificateStatuses.downloadable,
CertificateStatuses.generating,
CertificateStatuses.notpassing,
CertificateStatuses.restricted,
CertificateStatuses.unavailable,
)
def test_resubmit_error_certificate_skips_non_error_certificates(self, other_status):
# Create certificates with an error status and some other status
self._create_cert(self.courses[0].id, self.user, CertificateStatuses.error)
self._create_cert(self.courses[1].id, self.user, other_status)
# Re-submit certificates for all courses
call_command(self.command)
# Only the certificate with status "error" should have been re-submitted
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.notpassing)
self._assert_cert_status(self.courses[1].id, self.user, other_status)
def test_resubmit_error_certificate_none_found(self):
self._create_cert(self.courses[0].id, self.user, CertificateStatuses.downloadable)
call_command(self.command)
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.downloadable)
def test_course_caching(self):
# Create multiple certificates for the same course
self._create_cert(self.courses[0].id, UserFactory.create(), CertificateStatuses.error)
self._create_cert(self.courses[0].id, UserFactory.create(), CertificateStatuses.error)
self._create_cert(self.courses[0].id, UserFactory.create(), CertificateStatuses.error)
# Verify that we make only one Mongo query
# because the course is cached.
with check_mongo_calls(1):
call_command(self.command)
def test_invalid_course_key(self):
invalid_key = u"invalid/"
with self.assertRaisesRegex(CommandError, invalid_key):
call_command(self.command, course_key_list=[invalid_key])
def test_course_does_not_exist(self):
phantom_course = CourseLocator(org='phantom', course='phantom', run='phantom')
self._create_cert(phantom_course, self.user, 'error')
call_command(self.command)
# Expect that the certificate was NOT resubmitted
# since the course doesn't actually exist.
self._assert_cert_status(phantom_course, self.user, CertificateStatuses.error)
@ddt.ddt
class RegenerateCertificatesTest(CertificateManagementTest):
"""
Tests for regenerating certificates.
"""
command = 'regenerate_user'
def setUp(self):
"""
We just need one course here.
"""
super(RegenerateCertificatesTest, self).setUp()
self.course = self.courses[0]
@ddt.data(True, False)
@override_settings(CERT_QUEUE='test-queue')
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_OPENBADGES': True})
@patch('lms.djangoapps.certificates.api.XQueueCertInterface', spec=True)
def test_clear_badge(self, issue_badges, xqueue):
"""
Given that I have a user with a badge
If I run regeneration for a user
Then certificate generation will be requested
And the badge will be deleted if badge issuing is enabled
"""
key = self.course.location.course_key
self._create_cert(key, self.user, CertificateStatuses.downloadable)
badge_class = get_completion_badge(key, self.user)
BadgeAssertionFactory(badge_class=badge_class, user=self.user)
self.assertTrue(BadgeAssertion.objects.filter(user=self.user, badge_class=badge_class))
self.course.issue_badges = issue_badges
self.store.update_item(self.course, None)
args = u'-u {} -c {}'.format(self.user.email, text_type(key))
call_command(self.command, *args.split(' '))
xqueue.return_value.regen_cert.assert_called_with(
self.user,
key,
course=self.course,
forced_grade=None,
template_file=None,
generate_pdf=True
)
self.assertEqual(
bool(BadgeAssertion.objects.filter(user=self.user, badge_class=badge_class)), not issue_badges
)
@override_settings(CERT_QUEUE='test-queue')
@patch('capa.xqueue_interface.XQueueInterface.send_to_queue', spec=True)
def test_regenerating_certificate(self, mock_send_to_queue):
"""
Given that I have a user who has not passed course
If I run regeneration for that user
Then certificate generation will be not be requested
"""
key = self.course.location.course_key
self._create_cert(key, self.user, CertificateStatuses.downloadable)
args = u'-u {} -c {} --insecure'.format(self.user.email, text_type(key))
call_command(self.command, *args.split(' '))
certificate = GeneratedCertificate.eligible_certificates.get(
user=self.user,
course_id=key
)
self.assertEqual(certificate.status, CertificateStatuses.notpassing)
self.assertFalse(mock_send_to_queue.called)
class UngenerateCertificatesTest(CertificateManagementTest):
"""
Tests for generating certificates.
"""
command = 'ungenerated_certs'
def setUp(self):
"""
We just need one course here.
"""
super(UngenerateCertificatesTest, self).setUp()
self.course = self.courses[0]
@override_settings(CERT_QUEUE='test-queue')
@patch('capa.xqueue_interface.XQueueInterface.send_to_queue', spec=True)
def test_ungenerated_certificate(self, mock_send_to_queue):
"""
Given that I have ended course
If I run ungenerated certs command
Then certificates should be generated for all users who passed course
"""
mock_send_to_queue.return_value = (0, "Successfully queued")
key = self.course.location.course_key
self._create_cert(key, self.user, CertificateStatuses.unavailable)
with mock_passing_grade():
args = u'-c {} --insecure'.format(text_type(key))
call_command(self.command, *args.split(' '))
self.assertTrue(mock_send_to_queue.called)
certificate = GeneratedCertificate.eligible_certificates.get(
user=self.user,
course_id=key
)
self.assertEqual(certificate.status, CertificateStatuses.generating)
| cpennington/edx-platform | lms/djangoapps/certificates/tests/test_cert_management.py | Python | agpl-3.0 | 10,475 |
"""
.. module:: artist_credit_name
The **Artist Credit Name** Model.
PostgreSQL Definition
---------------------
The :code:`artist_credit_name` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE artist_credit_name ( -- replicate (verbose)
artist_credit INTEGER NOT NULL, -- PK, references artist_credit.id CASCADE
position SMALLINT NOT NULL, -- PK
artist INTEGER NOT NULL, -- references artist.id CASCADE
name VARCHAR NOT NULL,
join_phrase TEXT NOT NULL DEFAULT ''
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class artist_credit_name(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
"""
artist_credit = models.OneToOneField('artist_credit', primary_key=True)
position = models.SmallIntegerField(unique=True)
artist = models.ForeignKey('artist')
name = models.CharField(max_length=255)
join_phrase = models.TextField(default='')
def __str__(self):
return self.name
class Meta:
db_table = 'artist_credit_name'
| marios-zindilis/musicbrainz-django-models | musicbrainz_django_models/models/artist_credit_name.py | Python | gpl-2.0 | 1,269 |
import arrow
from dateutil.rrule import (rrulestr, rrule, rruleset,
MO, TU, WE, TH, FR, SA, SU)
from inbox.models.event import RecurringEvent, RecurringEventOverride
from inbox.events.util import parse_rrule_datetime
from inbox.log import get_logger
log = get_logger()
# How far in the future to expand recurring events
EXPAND_RECURRING_YEARS = 1
def link_events(db_session, event):
if isinstance(event, RecurringEvent):
# Attempt to find my overrides
return link_overrides(db_session, event)
elif isinstance(event, RecurringEventOverride):
# Attempt to find my master
return link_master(db_session, event)
def link_overrides(db_session, event):
# Find event instances which override this specific
# RecurringEvent instance.
overrides = db_session.query(RecurringEventOverride).\
filter_by(namespace_id=event.namespace_id,
master_event_uid=event.uid,
source=event.source).all()
for o in overrides:
if not o.master:
o.master = event
return overrides
def link_master(db_session, event):
# Find the master RecurringEvent that spawned this
# RecurringEventOverride (may not exist if it hasn't
# been synced yet)
if not event.master:
if event.master_event_uid:
master = db_session.query(RecurringEvent).\
filter_by(namespace_id=event.namespace_id,
uid=event.master_event_uid,
source=event.source).first()
if master:
event.master = master
return event.master # This may be None.
def parse_rrule(event):
# Parse the RRULE string and return a dateutil.rrule.rrule object
if event.rrule is not None:
if event.all_day:
start = event.start.to('utc').naive
else:
start = event.start.datetime
try:
rrule = rrulestr(event.rrule, dtstart=start,
compatible=True)
return rrule
except Exception as e:
log.error("Error parsing RRULE entry", event_id=event.id,
error=e, exc_info=True)
def parse_exdate(event):
# Parse the EXDATE string and return a list of arrow datetimes
excl_dates = []
if event.exdate:
name, values = event.exdate.split(':', 1)
tzinfo = 'UTC'
for p in name.split(';'):
# Handle TZID in EXDATE (TODO: submit PR to python-dateutil)
if p.startswith('TZID'):
tzinfo = p[5:]
for v in values.split(','):
# convert to timezone-aware dates
t = parse_rrule_datetime(v, tzinfo)
excl_dates.append(t)
return excl_dates
def get_start_times(event, start=None, end=None):
# Expands the rrule on event to return a list of arrow datetimes
# representing start times for its recurring instances.
# If start and/or end are supplied, will return times within that range,
# otherwise defaults to the event start date and now + 1 year;
# this can return a lot of instances if the event recurs more frequently
# than weekly!
if isinstance(event, RecurringEvent):
# Localize first so that expansion covers DST
if event.start_timezone:
event.start = event.start.to(event.start_timezone)
if not start:
start = event.start
else:
start = arrow.get(start)
if not end:
end = arrow.utcnow().replace(years=+EXPAND_RECURRING_YEARS)
else:
end = arrow.get(end)
rrules = parse_rrule(event)
if not rrules:
log.warn('Tried to expand a non-recurring event',
event_id=event.id)
return [event.start]
excl_dates = parse_exdate(event)
if len(excl_dates) > 0:
if not isinstance(rrules, rruleset):
rrules = rruleset().rrule(rrules)
map(rrules.exdate, excl_dates)
# Return all start times between start and end, including start and
# end themselves if they obey the rule.
if event.all_day:
# compare naive times, since date handling in rrulestr is naive
# when UNTIL takes the form YYYYMMDD
start = start.to('utc').naive
end = end.to('utc').naive
start_times = rrules.between(start, end, inc=True)
# Convert back to UTC, which covers daylight savings differences
start_times = [arrow.get(t).to('utc') for t in start_times]
return start_times
return [event.start]
# rrule constant values
freq_map = ('YEARLY',
'MONTHLY',
'WEEKLY',
'DAILY',
'HOURLY',
'MINUTELY',
'SECONDLY')
weekday_map = (MO, TU, WE, TH, FR, SA, SU)
def rrule_to_json(r):
if not isinstance(r, rrule):
r = parse_rrule(r)
info = vars(r)
j = {}
for field, value in info.iteritems():
if isinstance(value, tuple) and len(value) == 1:
value = value[0]
if field[0] == '_':
fieldname = field[1:]
else:
continue
if fieldname.startswith('by') and value is not None:
if fieldname == 'byweekday':
value = str(weekday_map[value])
j[fieldname] = value
elif fieldname == 'freq':
j[fieldname] = freq_map[value]
elif fieldname in ['dtstart', 'interval', 'wkst',
'count', 'until']: # tzinfo?
j[fieldname] = value
return j
| EthanBlackburn/sync-engine | inbox/events/recurring.py | Python | agpl-3.0 | 5,655 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2022 Jose Antonio Chavarría <jachavar@gmail.com>
# Copyright (c) 2020-2022 Alberto Gacías <alberto@migasfree.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import gettext
from ...utils import escape_format_string
class MigasLink:
PROTOCOL = 'mea'
ROUTES = {
'auth.group': 'groups',
'app_catalog.application': 'catalog/applications',
'app_catalog.packagesbyproject': 'catalog/project-packages',
'app_catalog.policy': 'catalog/policies',
'app_catalog.policygroup': 'catalog/policy-groups',
'client.computer': 'computers',
'client.error': 'errors',
'client.faultdefinition': 'fault-definitions',
'client.fault': 'faults',
'client.migration': 'migrations',
'client.packagehistory': 'packages-history',
'client.statuslog': 'status-logs',
'client.synchronization': 'synchronizations',
'client.user': 'users',
'core.attribute': 'attributes',
'core.clientattribute': 'attributes',
'core.serverattribute': 'tags',
'core.attributeset': 'attribute-sets',
'core.domain': 'domains',
'core.deployment': 'deployments',
'core.packageset': 'package-sets',
'core.package': 'packages',
'core.platform': 'platforms',
'core.project': 'projects',
'core.clientproperty': 'formulas',
'core.serverproperty': 'stamps',
'core.scheduledelay': 'schedule-delays',
'core.schedule': 'schedules',
'core.scope': 'scopes',
'core.store': 'stores',
'core.userprofile': 'user-profiles',
'device.capability': 'devices/capabilities',
'device.connection': 'devices/connections',
'device.device': 'devices/devices',
'device.driver': 'devices/drivers',
'device.logical': 'devices/logical',
'device.manufacturer': 'devices/manufacturers',
'device.model': 'devices/models',
'device.type': 'devices/types',
}
def __init__(self):
self._actions = None
self._exclude_links = []
self._include_links = []
def model_to_route(self, app, model):
return self.ROUTES.get(f'{app}.{model}', '')
@staticmethod
def related_title(related_objects):
if not related_objects:
return ''
first = related_objects[0]
return first._meta.verbose_name if related_objects.count() == 1 else first._meta.verbose_name_plural
@staticmethod
def get_description(action):
return action.get('description', '')
def is_related(self, action):
model = self._meta.model_name.lower()
if 'related' in action:
# COMPUTER === CID ATTRIBUTE
if self._meta.model_name == 'computer' or (
(
self._meta.model_name in ['attribute', 'clientattribute']
) and self.property_att.prefix == 'CID'
):
model = 'computer'
# ATTRIBUTE SET === ATTRIBUTE
elif self._meta.model_name == 'attributeset' \
or (self._meta.model_name == 'attribute' and self.pk > 1) \
and self.property_att.prefix == 'SET':
model = 'attributeset'
# DOMAIN === ATTRIBUTE
elif self._meta.model_name == 'domain' or \
(self._meta.model_name in ['attribute', 'serverattribute'] and self.property_att.prefix == 'DMN'):
model = 'domain'
return 'related' not in action or model in action['related']
def get_relations(self, request):
user = request.user.userprofile
server = request.META.get('HTTP_HOST')
related_objects = [
(f, f.model if f.model != self else None)
for f in self._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
] + [
(f, f.model if f.model != self else None)
for f in self._meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
]
objs = [
(f, f.model if f.model != self else None)
for f in self._meta.get_fields()
if f.many_to_many and not f.auto_created
]
actions = []
data = []
if self._actions is not None and any(self._actions):
for item in self._actions:
actions.append({
'url': item[1],
'title': item[0],
'description': item[2] if len(item) == 3 else '',
})
if self._meta.model_name.lower() in settings.MIGASFREE_EXTERNAL_ACTIONS:
element = settings.MIGASFREE_EXTERNAL_ACTIONS[self._meta.model_name.lower()]
for action in element:
if self.is_related(element[action]):
action_info = {
'name': action,
'model': self._meta.model_name,
'id': self.id,
'related_model': self._meta.model_name,
'related_ids': [self.id],
'server': server
}
actions.append({
'url': f'{self.PROTOCOL}://{json.dumps(action_info)}',
'title': element[action]['title'],
'description': self.get_description(element[action]),
})
data.append({
'model': self.model_to_route(self._meta.app_label, self._meta.model_name),
'pk': self.id,
'text': f'{self._meta.verbose_name} {self.__str__()}',
'count': 1,
'actions': actions
})
for obj, _ in objs:
if obj.remote_field.field.remote_field.parent_link:
_name = obj.remote_field.field.remote_field.parent_model.__name__.lower()
else:
_name = obj.remote_field.field.remote_field.model.__name__.lower()
if _name == 'attribute':
if self._meta.model_name == 'computer' and obj.attname == 'tags':
_name = 'tag'
if _name == 'permission':
break
if hasattr(obj.remote_field.model.objects, 'scope'):
rel_objects = obj.remote_field.model.objects.scope(user).filter(
**{obj.remote_field.name: self.id}
)
else:
rel_objects = obj.remote_field.model.objects.filter(
**{obj.remote_field.name: self.id}
)
count = rel_objects.count()
if count:
actions = []
if _name in settings.MIGASFREE_EXTERNAL_ACTIONS:
element = settings.MIGASFREE_EXTERNAL_ACTIONS[_name]
for action in element:
if 'many' not in element[action] or element[action]['many'] or count == 1:
if self.is_related(element[action]):
info_action = {
'name': action,
'model': self._meta.model_name,
'id': self.id,
'related_model': _name,
'related_ids': list(rel_objects.values_list('id', flat=True)),
'server': server,
}
actions.append({
'url': f'{self.PROTOCOL}://{json.dumps(info_action)}',
'title': element[action]['title'],
'description': self.get_description(element[action]),
})
data.append({
'api': {
'model': self.model_to_route(
obj.remote_field.model._meta.app_label,
obj.remote_field.model._meta.model_name
),
'query': {
f'{obj.remote_field.name}__id': self.pk
}
},
'text': gettext(obj.remote_field.field.verbose_name),
'count': count,
'actions': actions
})
for related_object, _ in related_objects:
related_model, _field = self.transmodel(related_object)
if related_model:
# EXCLUDE CID
if related_model.__name__.lower() != 'computer' or not (
self._meta.model_name == 'attribute' and self.property_att.prefix == 'CID'
):
if f'{related_model._meta.model_name} - {_field}' not in self._exclude_links:
if hasattr(related_model.objects, 'scope'):
if related_model.__name__.lower() == 'computer':
rel_objects = related_model.productive.scope(user).filter(
**{related_object.field.name: self.id}
)
else:
rel_objects = related_model.objects.scope(user).filter(
**{related_object.field.name: self.id}
)
else:
rel_objects = related_model.objects.filter(
**{related_object.field.name: self.id}
)
count = rel_objects.count()
if count and related_model._meta.app_label != 'authtoken':
actions = []
if related_model.__name__.lower() in settings.MIGASFREE_EXTERNAL_ACTIONS:
element = settings.MIGASFREE_EXTERNAL_ACTIONS[related_model.__name__.lower()]
for action in element:
if 'many' not in element[action] or element[action]['many'] or count == 1:
if self.is_related(element[action]):
info_action = {
'name': action,
'model': self._meta.model_name,
'id': self.id,
'related_model': related_model.__name__.lower(),
'related_ids': list(rel_objects.values_list('id', flat=True)),
'server': server,
}
actions.append({
'url': f'{self.PROTOCOL}://{json.dumps(info_action)}',
'title': element[action]['title'],
'description': self.get_description(element[action]),
})
if related_model.__name__.lower() == 'computer':
data.append({
'api': {
'model': self.model_to_route(
related_model._meta.app_label,
related_model._meta.model_name
),
'query': {
_field: self.id,
'status__in': 'intended,reserved,unknown'
}
},
'text': '{} [{}]'.format(
gettext(related_model._meta.verbose_name_plural),
gettext(related_object.field.verbose_name)
),
'count': count,
'actions': actions
})
else:
if related_model.__name__.lower() == 'faultdefinition' \
and _field == 'users__user_ptr':
_field = 'users__id'
data.append({
'api': {
'model': self.model_to_route(
related_model._meta.app_label,
related_model._meta.model_name
),
'query': {
_field: self.id
}
},
'text': '{} [{}]'.format(
gettext(related_model._meta.verbose_name_plural),
gettext(related_object.field.verbose_name)
),
'count': count,
'actions': actions
})
# SPECIAL RELATIONS (model must have a method named: 'related_objects').
actions = []
if self._meta.model_name.lower() in [
'device', 'deployment', 'scope', 'domain',
'attributeset', 'faultdefinition', 'platform'
]:
rel_objects = self.related_objects('computer', user)
if rel_objects.exists():
if 'computer' in settings.MIGASFREE_EXTERNAL_ACTIONS:
element = settings.MIGASFREE_EXTERNAL_ACTIONS['computer']
for action in element:
if 'many' not in element[action] or element[action]['many'] or rel_objects.count() == 1:
if self.is_related(element[action]):
info_action = {
'name': action,
'model': self._meta.model_name,
'id': self.id,
'related_model': 'computer',
'related_ids': list(rel_objects.values_list('id', flat=True)),
'server': server,
}
actions.append({
'url': f'{self.PROTOCOL}://{json.dumps(info_action)}',
'title': element[action]['title'],
'description': self.get_description(element[action]),
})
if self._meta.model_name.lower() == 'platform':
from ...client.models.computer import Computer
data.append({
'api': {
'model': 'computers',
'query': {
'platform__id': self.id,
'status__in': ','.join(Computer.PRODUCTIVE_STATUS)
}
},
'text': gettext(self.related_title(rel_objects)),
'count': rel_objects.count(),
'actions': actions
})
elif self._meta.model_name.lower() == 'device':
from .attribute import Attribute
data.append({
'api': {
'model': 'computers',
'query': {
'sync_attributes__id__in': ','.join(map(str, list(
Attribute.objects.scope(
request.user.userprofile
).filter(
logical__device__id=self.id
).values_list('id', flat=True)))
),
'status__in': 'intended,reserved,unknown'
}
},
'text': gettext(self.related_title(rel_objects)),
'count': rel_objects.count(),
'actions': actions
})
else:
data.append({
'api': {
'model': 'computers',
'query': {
'id__in': ','.join(map(str, list(rel_objects.values_list('id', flat=True))))
}
},
'text': gettext(self.related_title(rel_objects)),
'count': rel_objects.count(),
'actions': actions
})
if self._meta.model_name.lower() == 'computer':
# special case installed packages
installed_packages_count = self.packagehistory_set.filter(
package__project=self.project,
uninstall_date__isnull=True
).count()
if installed_packages_count > 0:
data.append({
'api': {
'model': self.model_to_route('client', 'packagehistory'),
'query': {
'computer__id': self.id,
'package__project__id': self.project.id,
'uninstall_date': True # isnull = True
},
},
'text': f'{gettext("Installed Packages")} [{gettext("computer")}]',
'count': installed_packages_count,
'actions': actions
})
if self._meta.model_name.lower() == 'package':
# special case computers with package installed
computers_count = self.packagehistory_set.filter(
package=self,
uninstall_date__isnull=True
).count()
if computers_count > 0:
data.append({
'api': {
'model': self.model_to_route('client', 'computer'),
'query': {
'installed_package': self.id
},
},
'text': f'{gettext("Installed package")} [{gettext("computer")}]',
'count': computers_count,
'actions': actions
})
for _include in self._include_links:
try:
_model_name, _field_name = _include.split(' - ')
data.append({
'api': {
'model': self.model_to_route(
self._meta.app_label,
_model_name
),
'query': {
f'{_field_name}__id': self.id
}
},
'text': f'{gettext(_model_name)} [{gettext(_field_name)}]'
})
except ValueError:
pass
return data
def relations(self, request):
data = []
if self._meta.model_name == 'node':
from ...client.models import Computer
data.append({
'api': {
'model': 'computers',
'query': {'product': self.computer.product},
},
'text': f'{gettext("computer")} [{gettext("product")}]',
'count': Computer.productive.scope(request.user.userprofile).filter(
product=self.computer.product
).count(),
'actions': []
})
return data
# DOMAIN === ATTRIBUTE
if self._meta.model_name == 'domain' or (
self._meta.model_name == 'serverattribute' and self.property_att.prefix == 'DMN'
):
if self._meta.model_name == 'domain':
from . import ServerAttribute
domain = self
try:
att = ServerAttribute.objects.get(
value=str(self.name),
property_att__prefix='DMN'
)
except ObjectDoesNotExist:
att = None
else:
from . import Domain
att = self
try:
domain = Domain.objects.get(name=self.value)
except ObjectDoesNotExist:
domain = None
if att:
att_data = att.get_relations(request)
else:
att_data = []
if domain:
set_data = domain.get_relations(request)
data = set_data + att_data
return data
# ATTRIBUTE SET === ATTRIBUTE
if self._meta.model_name == 'attributeset' \
or (self._meta.model_name == 'attribute' and self.pk > 1) \
and self.property_att.prefix == 'SET':
if self._meta.model_name == 'attributeset':
from . import Attribute
attribute_set = self
try:
att = Attribute.objects.get(
value=str(self.name),
property_att__prefix='SET'
)
except ObjectDoesNotExist:
att = None
else:
from . import AttributeSet
att = self
try:
attribute_set = AttributeSet.objects.get(name=self.value)
except ObjectDoesNotExist:
attribute_set = None
if att:
att_data = att.get_relations(request)
else:
att_data = []
if attribute_set:
set_data = attribute_set.get_relations(request)
data = set_data + att_data
return data
# COMPUTER === CID ATTRIBUTE
if self._meta.model_name == 'computer' or (
(
self._meta.model_name == 'attribute' or
self._meta.model_name == 'clientattribute'
) and self.property_att.prefix == 'CID'
):
if self._meta.model_name == 'computer':
from . import Attribute
computer = self
try:
cid = Attribute.objects.get(
value=str(self.id),
property_att__prefix='CID'
)
except ObjectDoesNotExist:
cid = None
else:
from ...client.models import Computer
cid = self
computer = Computer.objects.get(pk=int(self.value))
computer_data = computer.get_relations(request)
if cid:
cid_data = cid.get_relations(request)
else:
cid_data = []
data = computer_data + cid_data
return data
else:
return self.get_relations(request)
def badge(self):
if self._meta.model_name == 'clientattribute' \
or self._meta.model_name == 'attribute':
if self.property_att.prefix == 'CID':
from ...client.models import Computer
try:
self = Computer.objects.get(id=self.value)
except ObjectDoesNotExist:
pass
elif self.property_att.prefix == 'SET':
from . import AttributeSet
try:
self = AttributeSet.objects.get(name=self.value)
except ObjectDoesNotExist:
pass
elif self.property_att.prefix == 'DMN':
from . import Domain
try:
self = Domain.objects.get(name=self.value)
except ObjectDoesNotExist:
pass
lnk = {
'pk': self.id,
'text': escape_format_string(self.__str__()),
}
if self._meta.model_name == 'computer':
lnk['status'] = self.status
lnk['summary'] = '{}, {}, {}, {}'.format(
gettext(self.status),
self.project,
self.ip_address,
self.sync_user
)
elif self._meta.model_name == 'domain':
lnk['status'] = 'domain'
lnk['summary'] = gettext(self._meta.verbose_name)
elif self._meta.model_name == 'serverattribute' \
or (self._meta.model_name == 'attribute' and self.property_att.sort == 'server'):
lnk['status'] = 'tag'
lnk['summary'] = gettext(self._meta.verbose_name)
elif self._meta.model_name == 'attributeset' \
or (self._meta.model_name in ['clientattribute', 'attribute'] and self.id == 1):
lnk['status'] = 'set'
lnk['summary'] = f'({gettext(self._meta.verbose_name)}) {self.description}'
elif self._meta.model_name == 'clientattribute' \
or (self._meta.model_name == 'attribute' and self.property_att.sort == 'client'):
lnk['status'] = 'attribute'
lnk['summary'] = self.description
elif self._meta.model_name == 'policy':
lnk['status'] = 'policy'
lnk['summary'] = gettext(self._meta.verbose_name)
return lnk
def transmodel(self, obj):
from ...client.models import Computer
from . import ClientAttribute, ServerAttribute
if obj.related_model._meta.label_lower == 'client.computer' and \
self.__class__.__name__ in ['ClientAttribute', 'Attribute'] and \
self.property_att.prefix == 'CID':
return Computer, 'sync_attributes__id'
if obj.related_model._meta.label_lower == 'core.attribute':
if self.sort == 'server':
return ServerAttribute, 'property__id'
else:
return ClientAttribute, 'property__id'
elif obj.related_model._meta.label_lower == 'client.computer':
if self.__class__.__name__ == ['ClientAttribute', 'Attribute', 'ServerAttribute']:
if obj.field.related_model._meta.model_name == 'serverattribute':
return Computer, 'tags__id'
elif obj.field.related_model._meta.model_name == 'attribute':
return Computer, 'sync_attributes__id'
elif obj.related_model._meta.label_lower in [
'admin.logentry',
'core.scheduledelay',
'hardware.node'
]:
return '', '' # Excluded
if obj.field.__class__.__name__ in ['ManyRelatedManager', 'OneToOneField', 'ForeignKey']:
return obj.related_model, f'{obj.field.name}__id'
else:
return obj.related_model, '{}__{}'.format(
obj.field.name,
obj.field.m2m_reverse_target_field_name()
)
| migasfree/migasfree-backend | migasfree/core/models/migas_link.py | Python | gpl-3.0 | 28,879 |
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from flask.ext.uploads import UploadSet, configure_uploads, IMAGES
from flask.ext.misaka import Misaka
from raven.contrib.flask import Sentry
import steam
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('settings')
app.config.from_pyfile('settings.py')
# Setup debugtoolbar, if we're in debug mode.
if app.debug:
from flask.ext.debugtoolbar import DebugToolbarExtension
toolbar = DebugToolbarExtension(app)
# Flask extensions
db = SQLAlchemy(app)
login_manager = LoginManager(app)
oid = OpenID(app)
workshopzips = UploadSet('workshopZips', 'zip')
modimages = UploadSet('modImages', IMAGES)
markdown = Misaka(app, escape=True, wrap=True)
sentry = Sentry(app)
# Setup steamodd
steam.api.key.set(app.config['STEAM_API_KEY'])
steam.api.socket_timeout.set(5)
# Setup Flask-Uploads
configure_uploads(app, [workshopzips, modimages])
# Views
import views
# Blueprints
from .tf2.views import tf2 as tf2_blueprint
from .users.views import users as users_blueprint
from .mods.views import mods as mods_blueprint
from .moderator.views import moderator as moderator_blueprint
# Admin
from .users.models import User
from .mods.models import ModAuthor
from .admin.views import admin
admin.init_app(app)
# TF2 Schema
from .tf2.models import TF2Item, TF2EquipRegion, TF2BodyGroup
app.register_blueprint(users_blueprint)
app.register_blueprint(mods_blueprint)
app.register_blueprint(tf2_blueprint)
app.register_blueprint(moderator_blueprint)
# Assets
from assets import assets
# Jinja2 Filters
from filters import format_thousands, pluralize, datetime_to_datestring
app.add_template_filter(format_thousands)
app.add_template_filter(pluralize)
app.add_template_filter(datetime_to_datestring)
# Load current app version into globals
from functions import current_version
app.config['VERSION'] = current_version() | Smashman/mods.tf | app/__init__.py | Python | gpl-3.0 | 1,987 |
from flask import render_template
from flask.views import View
class Index(View):
def dispatch_request(self):
return render_template('index.html', komuna='prishtina')
| opendatakosovo/municipality-procurement-visualizer | gpv/views/pages/index.py | Python | gpl-2.0 | 181 |
import os
import sys
import re
from bento._config \
import \
_CLI
import six
SYS_EXECUTABLE = os.path.normpath(sys.executable)
SCRIPT_TEXT = """\
# BENTO AUTOGENERATED-CONSOLE SCRIPT
if __name__ == '__main__':
import sys
from %(module)s import %(function)s
sys.exit(%(function)s())
"""
_LAUNCHER_MANIFEST = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s.exe"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>"""
# XXX: taken from setuptools, audit this
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing
rules"""
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb*2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
# XXX: taken verbatim from setuptools, rewrite this crap
def get_script_header(executable=SYS_EXECUTABLE, wininst=False):
from distutils.command.build_scripts import first_line_re
match = first_line_re.match(six.b(""))
options = ''
if match:
options = match.group(1) or ''
if options:
options = ' ' + options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if six.u(hdr).encode('ascii') != hdr:
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x' + options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
#executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr
def create_scripts(executables, bdir):
ret = {}
for name, executable in executables.items():
if sys.platform == "win32":
ret[name] = create_win32_script(name, executable, bdir)
else:
ret[name] = create_posix_script(name, executable, bdir)
return ret
def create_win32_script(name, executable, scripts_node):
script_text = SCRIPT_TEXT % {"python_exec": SYS_EXECUTABLE,
"module": executable.module,
"function": executable.function}
wininst = False
header = get_script_header(SYS_EXECUTABLE, wininst)
ext = '-script.py'
launcher = _CLI
new_header = re.sub('(?i)pythonw.exe', 'python.exe', header)
if os.path.exists(new_header[2:-1]) or sys.platform != 'win32':
hdr = new_header
else:
hdr = header
fid = open(launcher, "rb")
try:
cnt = fid.read()
finally:
fid.close()
def _write(name, cnt, mode):
target = scripts_node.make_node(name)
target.safe_write(cnt, "w%s" % mode)
return target
nodes = []
nodes.append(_write(name + ext, hdr + script_text, 't'))
nodes.append(_write(name + ".exe", cnt, 'b'))
nodes.append(_write(name + ".exe.manifest", _LAUNCHER_MANIFEST % (name,), 't'))
return nodes
def create_posix_script(name, executable, scripts_node):
header = "#!%(python_exec)s\n" % {"python_exec": SYS_EXECUTABLE}
cnt = SCRIPT_TEXT % {"python_exec": SYS_EXECUTABLE,
"module": executable.module,
"function": executable.function}
n = scripts_node.make_node(name)
n.safe_write(header + cnt)
return [n]
| cournape/Bento | bento/commands/script_utils.py | Python | bsd-3-clause | 4,301 |
from opcodes import NOT, SUB
from rule import Rule
from z3 import BitVec, BitVecVal
"""
Rule:
SUB(~0, X) -> NOT(X)
Requirements:
"""
rule = Rule()
n_bits = 256
# Input vars
X = BitVec('X', n_bits)
# Constants
ZERO = BitVecVal(0, n_bits)
# Non optimized result
nonopt = SUB(~ZERO, X)
# Optimized result
opt = NOT(X)
rule.check(nonopt, opt)
| ethereum/solidity | test/formal/sub_not_zero_x_to_not_x_256.py | Python | gpl-3.0 | 347 |
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('VizForm')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import VizjQuery
class VizForm(VizjQuery.VizjQuery):
#======================== header ==========================================
templateHeader = '''
<style type="text/css">
</style>
'''
#======================== body ============================================
templateBody = '''
<script type='text/javascript'>
// wait for the page to be loaded, then create the form (once)
$(document).ready(getData_{VIZID});
//======================= get form ========================================
function getData_{VIZID}() {{
var statusDivId;
// update the status message
statusDivId = 'status_div_{VIZID}';
updateStatus(statusDivId,'busy','');
// get updated data from the server and execute
jQuery.ajax({{
type: 'GET',
url: '/{RESOURCE}/',
timeout: 5*1000,
statusCode: {{
200: function(response) {{
try {{
drawForm_{VIZID}(response);
}} catch(err) {{
throw err;
}}
updateStatus(statusDivId,'success','');
}},
400: function() {{
updateStatus(statusDivId,'failure','Malformed.');
}},
401: function() {{
updateStatus(statusDivId,'failure','Access denied.');
}},
404: function() {{
updateStatus(statusDivId,'failure','Resource not found.');
}},
500: function() {{
updateStatus(statusDivId,'failure','Internal server error.');
}}
}},
error: function(jqXHR, textStatus, errorThrown) {{
if (textStatus=='timeout') {{
updateStatus(statusDivId,'failure','Server unreachable.');
}}
}}
}});
}}
function drawForm_{VIZID}(data) {{
var cells,
thisCell,
fieldId;
// clear old contents
document.getElementById('chart_div_{VIZID}').innerHTML = '';
// draw new table
$('<table/>', {{
'class': 'formTable_{VIZID}'
}}).appendTo('#chart_div_{VIZID}');
for (var i = 0; i < data.length; i++) {{
cells = [];
// name
thisCell = '';
thisCell += '<td>';
thisCell += data[i].name;
thisCell += '</td>';
cells.push(thisCell);
// field
fieldId = 'fieldTable_{VIZID}_'+data[i].name
if (data[i].type=='text') {{
thisCell = '';
thisCell += '<td>';
thisCell += '<input type="text"';
thisCell += ' id="'+fieldId+'"';
thisCell += ' name="'+data[i].name+'"';
thisCell += ' value="'+data[i].value+'"';
thisCell += ' class="formElems_{VIZID}"';
thisCell += '/>';
thisCell += '</td>';
}} else if (data[i].type=='password') {{
thisCell = '';
thisCell += '<td>';
thisCell += '<input type="password"';
thisCell += ' id="'+fieldId+'"';
thisCell += ' name="'+data[i].name+'"';
thisCell += ' value="'+data[i].value+'"';
thisCell += ' class="formElems_{VIZID}"';
thisCell += '/>';
thisCell += '</td>';
}} else if (data[i].type=='boolean') {{
thisCell = '';
thisCell += '<td>';
thisCell += '<input type="checkbox"';
thisCell += ' id="'+fieldId+'"';
thisCell += ' name="'+data[i].name+'"';
thisCell += ' class="formElems_{VIZID}"';
if (data[i].value==true) {{
thisCell += ' checked ';
}}
thisCell += '/>';
thisCell += '</td>';
}} else if (data[i].type=='select') {{
thisCell = '';
thisCell += '<td>';
thisCell += '<select';
thisCell += ' id="'+fieldId+'"';
thisCell += ' name="'+data[i].name+'"';
thisCell += ' class="formElems_{VIZID}"';
thisCell += '>';
for (var optidx = 0; optidx < data[i].optionDisplay.length; optidx++) {{
thisCell += '<option value="'+data[i].optionValue[optidx]+'"';
if (data[i].optionValue[optidx]==data[i].value) {{
thisCell += ' selected="selected"';
}}
thisCell += '>';
thisCell += data[i].optionDisplay[optidx];
thisCell += '</option>';
}}
thisCell += '</select>';
thisCell += '</td>';
}} else {{
thisCell = '';
thisCell += '<td>';
thisCell += 'WARNING unknown type: '+data[i].type;
thisCell += '</td>';
}}
cells.push(thisCell);
// status
thisCell = '';
thisCell += '<td>';
thisCell += '<div id="'+fieldId+'_status"></div>';
thisCell += '</td>';
cells.push(thisCell);
$('<tr/>', {{
html: cells.join('')
}}).appendTo('.formTable_{VIZID}');
}}
$('<tr/>', {{
html: '<button onclick="postFormData_{VIZID}()">Submit</button>'
}}).appendTo('.formTable_{VIZID}');
}}
//======================= post from data ==================================
function postFormData_{VIZID}() {{
var statusDivId,
formElems,
dataToSend,
i,
fieldName,
fieldValue;
// update the status message
statusDivId = 'status_div_{VIZID}';
updateStatus(statusDivId,'busy', '');
// build data to send
formElems = document.getElementsByClassName('formElems_{VIZID}');
dataToSend = {{}};
for (i=0; i<formElems.length; i++) {{
fieldName = formElems[i].name;
if (formElems[i].type=='text') {{
fieldValue = formElems[i].value;
}} else if (formElems[i].type=='password') {{
fieldValue = formElems[i].value;
}} else if (formElems[i].type=='checkbox') {{
fieldValue = formElems[i].checked;
}} else if (formElems[i].type=='select-one') {{
fieldValue = formElems[i].options[formElems[i].selectedIndex].value;
}} else {{
console.log('WARNING: in post, unexpected type '+formElems[i].type);
}}
dataToSend[fieldName] = fieldValue;
}}
jQuery.ajax({{
type: 'POST',
url: '/{RESOURCE}/',
timeout: 5*1000,
data: JSON.stringify(dataToSend),
statusCode: {{
200: function() {{
updateStatus(statusDivId,'success', '');
location.reload();
}},
400: function() {{
updateStatus(statusDivId,'failure','Malformed.');
}},
401: function() {{
updateStatus(statusDivId,'failure','Access denied.');
}},
404: function() {{
updateStatus(statusDivId,'failure','Resource not found.');
}},
500: function() {{
updateStatus(statusDivId,'failure','Internal server error.');
}}
}},
error: function(jqXHR, textStatus, errorThrown) {{
if (textStatus=='timeout') {{
updateStatus(statusDivId,'failure','Server unreachable.');
}}
}}
}});
}}
</script>
'''
def __init__(self, **kw):
super(VizForm, self).__init__(forbidAutorefresh=True, autorefresh=False, **kw)
| dustcloud/dustlink | views/web/dustWeb/viz/VizForm.py | Python | bsd-3-clause | 9,108 |
#!/usr/bin/env python
"""
Copyright (c) 2011, Scott Burns
All rights reserved.
"""
from os.path import join as pj
import time
import os
from string import Template
from time import strftime
from pdb import set_trace
from warnings import warn
from .config import SpecError
import util
__version__ = "0.0"
class SPM(object):
"""Main class for generating SPM batches"""
text = {
'slicetime':"""
matlabbatch{${batch_n}}.spm.temporal.st.scans = {${images}}';
matlabbatch{${batch_n}}.spm.temporal.st.nslices = ${nslices};
matlabbatch{${batch_n}}.spm.temporal.st.tr = ${tr};
matlabbatch{${batch_n}}.spm.temporal.st.ta = ${ta};
matlabbatch{${batch_n}}.spm.temporal.st.so = [${so}];
matlabbatch{${batch_n}}.spm.temporal.st.refslice = ${ref};
matlabbatch{${batch_n}}.spm.temporal.st.prefix = '${prefix}';""",
'realign-er':"""
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.data ={${images}}';
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.eoptions.quality = ${quality};
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.eoptions.sep = ${separation};
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.eoptions.fwhm = ${smoothing};
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.eoptions.rtm = ${num_passes};
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.eoptions.interp = ${e_interpolation};
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.eoptions.wrap = [${e_wrap}];
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.eoptions.weight = {'${weight}'};
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.roptions.which = [${which}];
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.roptions.interp = ${r_interpolation};
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.roptions.wrap = [${r_wrap}];
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.roptions.mask = ${mask};
matlabbatch{${batch_n}}.spm.spatial.realign.estwrite.roptions.prefix = '${prefix}';""",
'normalize-er':"""
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.subj.source = {'${source}'};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.subj.wtsrc = '${weight_src}';
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.subj.resample = {${images}};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.eoptions.template = {'${template}'};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.eoptions.weight = '${template_weight}';
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.eoptions.smosrc = ${source_smooth};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.eoptions.smoref = ${template_smooth};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.eoptions.regtype = '${reg_type}';
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.eoptions.cutoff = ${freq_cutoff};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.eoptions.nits = ${iterations};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.eoptions.reg = ${regularization};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.roptions.preserve = ${preserve};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.roptions.bb = [${bounding_box}];
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.roptions.vox = [${voxel_size}];
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.roptions.interp = ${interpolation};
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.roptions.wrap = [${wrap}];
matlabbatch{${batch_n}}.spm.spatial.normalise.estwrite.roptions.prefix = '${prefix}';""",
'smooth':"""
matlabbatch{${batch_n}}.spm.spatial.smooth.data = {${images}};
matlabbatch{${batch_n}}.spm.spatial.smooth.fwhm = [${fwhm}];
matlabbatch{${batch_n}}.spm.spatial.smooth.dtype = ${datatype};
matlabbatch{${batch_n}}.spm.spatial.smooth.im = ${implicit};
matlabbatch{${batch_n}}.spm.spatial.smooth.prefix = '${prefix}';""",
'model':"""
matlabbatch{${batch_n}}.spm.stats.fmri_spec.dir = {'${directory}'};
matlabbatch{${batch_n}}.spm.stats.fmri_spec.timing.units = '${timing_units}';
matlabbatch{${batch_n}}.spm.stats.fmri_spec.timing.RT = ${TR};
matlabbatch{${batch_n}}.spm.stats.fmri_spec.timing.fmri_t = ${microtime_resolution};
matlabbatch{${batch_n}}.spm.stats.fmri_spec.timing.fmri_t0 = ${microtime_onset};
${session}
matlabbatch{${batch_n}}.spm.stats.fmri_spec.fact = struct('name', {}, 'levels', {});
matlabbatch{${batch_n}}.spm.stats.fmri_spec.bases.hrf.derivs = [${hrf_derivatives}];
matlabbatch{${batch_n}}.spm.stats.fmri_spec.volt = ${volterra_interactions};
matlabbatch{${batch_n}}.spm.stats.fmri_spec.global = '${global_normalization}';
matlabbatch{${batch_n}}.spm.stats.fmri_spec.mask = {'${explicit_mask}'};
matlabbatch{${batch_n}}.spm.stats.fmri_spec.cvi = '${serial_correlations}';""",
'session':"""
matlabbatch{${batch_n}}.spm.stats.fmri_spec.sess(${session_n}).scans = {${images}};
matlabbatch{${batch_n}}.spm.stats.fmri_spec.sess(${session_n}).cond = struct('name', {}, 'onset', {}, 'duration', {}, 'tmod', {}, 'pmod', {});
matlabbatch{${batch_n}}.spm.stats.fmri_spec.sess(${session_n}).multi = {'${multiple_condition_mat}'};
matlabbatch{${batch_n}}.spm.stats.fmri_spec.sess(${session_n}).regress = struct('name', {}, 'val', {});
matlabbatch{${batch_n}}.spm.stats.fmri_spec.sess(${session_n}).multi_reg = {'${multiple_regression_file}'};
matlabbatch{${batch_n}}.spm.stats.fmri_spec.sess(${session_n}).hpf = ${hpf};""",
'estimate':"""
matlabbatch{${batch_n}}.spm.stats.fmri_est.spmmat = {'${spm_mat_file}'};
matlabbatch{${batch_n}}.spm.stats.fmri_est.method.Classical = 1;""",
'contrast_manager':"""
matlabbatch{${batch_n}}.spm.stats.con.spmmat = {'${spm_mat_file}'};
${contrast}
matlabbatch{${batch_n}}.spm.stats.con.delete = ${delete};""",
'contrast':"""
matlabbatch{${batch_n}}.spm.stats.con.consess{${number}}.tcon.name = '${name}';
matlabbatch{${batch_n}}.spm.stats.con.consess{${number}}.tcon.convec = [${vector}];
matlabbatch{${batch_n}}.spm.stats.con.consess{${number}}.tcon.sessrep = '${replication}';""",
'results':"""
matlabbatch{${batch_n}}.spm.stats.results.spmmat = {'${spm_mat_file}'};
matlabbatch{${batch_n}}.spm.stats.results.conspec.titlestr = '${titlestr}';
matlabbatch{${batch_n}}.spm.stats.results.conspec.contrasts = ${contrasts};
matlabbatch{${batch_n}}.spm.stats.results.conspec.threshdesc = '${thresh_desc}';
matlabbatch{${batch_n}}.spm.stats.results.conspec.thresh = ${threshold};
matlabbatch{${batch_n}}.spm.stats.results.conspec.extent = 0;
matlabbatch{${batch_n}}.spm.stats.results.conspec.mask = struct('contrasts', {}, 'thresh', {}, 'mtype', {});
matlabbatch{${batch_n}}.spm.stats.results.units = 1;
matlabbatch{${batch_n}}.spm.stats.results.print = true;""",
'exec':"""
try
if exist('SPM.mat', 'file') == 2
delete('SPM.mat')
end
spm_jobman('serial',matlabbatch);
ec = 0;
catch ME
disp(ME.message)
disp(['SPM batch failed'])
ec = 3; % SPM failed
end
if ec == 0
d = date;
ps_file = ['spm_' d(8:end) d(4:6) d(1:2) '.ps'];
if exist(ps_file, 'file') == 2
status = copyfile(ps_file, '${new_ps}');
delete(ps_file)
if ~status
disp(['Couldnt copy postscript'])
ec = 2; % couldn't copy file
end
else
disp(['Postscript was not created'])
ec = 1; % .ps was not created
end
end
spm('quit')
disp(['Exiting with code ' num2str(ec)])
exit(ec);""",
'art':"""
ec = 0;
try
art('sess_file', '${art_sessfile}');
saveas(gcf, '${art_jpg}');
${reg_width_text}
catch ME
disp(ME.message)
ec = 3;
end
disp(['Exiting with code ' num2str(ec)])
exit(ec);
""",
'art_sess':"""
sessions: ${n_runs}
drop_flag: 0
motion_file_type: 0
end
"""}
def __init__(self, subj, paradigm, pieces, total):
"""
Parameters
----------
subj: map
at least contains 'id'
paradigm:
"""
# unpack spm settings
find_dict = lambda d,k: d[k] if k in d else {}
self.spm = {}
self.spm['g'] = find_dict(total, 'SPM')
self.spm['pr'] = find_dict(total['project'], 'SPM')
self.spm['pa'] = find_dict(paradigm, 'SPM')
self.spm['s'] = find_dict(subj, 'SPM')
self.subj = subj
self.id = subj['id']
#unpack the required information
self.paradigm = paradigm
self.par_name = paradigm['name']
if 'n_runs' in self.paradigm:
self.n_runs = self.paradigm['n_runs']
if 'n_runs' in self.subj:
self.n_runs = self.subj['n_runs']
if not hasattr(self, 'n_runs'):
raise SpecError('n_runs was not declared in the subject or paradigm')
self.n_volumes = paradigm['n_volumes']
self.out_dir = pj(paradigm['output_directory'], paradigm['name'])
self.pieces = pieces
self.project = total['project']
if 'nibble' in total and 'email' in total['nibble']:
self.email = total['nibble']['email']
self.raw = self.find_images()
self.skip = False
if len(self.raw) == 0:
self.skip = True
if not self.skip:
# self.resolve()
try:
self.resolve()
except KeyError, e:
print e, self.id
# save mlab path
if 'matlab_path' in total:
self.mlab_path = total['matlab_path']
else:
self.mlab_path = 'matlab'
def get_stages(self, piece_name):
"""Return a copy the stages for a given piece"""
stages_list = [p['stages'] for p in self.pieces if p['name'] == piece_name]
if len(stages_list) > 1:
warn("piece name convergence!")
elif len(stages_list) == 0:
warn("no pieces with name == %s found" % (piece_name))
else:
return stages_list[0][:]
def get_piece(self, piece_name):
""" Return full piece dict whose 'name' is piece_name """
good_piece = [piece for piece in self.pieces if piece['name'] == piece_name]
if len(good_piece) > 1 or len(good_piece) == 0:
print("Can't find piece with name == %s" % piece_name)
to_return = {}
else:
to_return = good_piece[0].copy()
return to_return
def find_prefix(self, stage, piece):
"""Find the prefix each previous stage has added"""
if piece['name'] == 'pre':
stages = piece['stages'][:]
if stage in stages:
ind = stages.index(stage)
else:
ind = len(stages)
stages[:] = stages[:ind]
else:
stages = self.get_stages('pre')
# join all previous prefixes
stages.reverse()
pre = ''.join([self.replace_dict[s]['prefix'] for s in stages])
return pre
def find_images(self):
"""Find the images for this subject"""
# are the images from the subject or determined by the paradigm?
if ('data_toplevel' in self.paradigm and
'run_directory' in self.paradigm and
'images' in self.paradigm):
dtl = self.paradigm['data_toplevel']
rd = self.paradigm['run_directory']
im = self.paradigm['images']
if len(rd) != len(im):
raise SpecError("""run_directory and images must have
the same amount of entries in your
project's paradigm""")
zipped = zip(rd, im)
raw_images = [pj(dtl, self.id, r, i) for (r, i) in zipped]
else:
try:
raw_images = self.subj[self.par_name]['images']
except KeyError:
raise SpecError("""No subject-specific images found""")
# check that images exist
for i, raw in enumerate(raw_images[:]):
if not os.path.isfile(raw):
raw_images[i] = ''
raw_images[:] = [raw for raw in raw_images if raw != '']
# check that n_runs equals number of images found, correct if doesn't
if self.n_runs != len(raw_images):
self.n_runs = len(raw_images)
return raw_images
def mvmt_file(self, run_n):
"""Return path for the rp_*.txt file for a given run number"""
raw_img = self.raw[run_n - 1]
piece = self.get_piece('pre')
pre = self.find_prefix('realign-er', piece)
dirname, base = os.path.split(raw_img)
root, _ = os.path.splitext(base)
rp_fname = 'rp_%s%s.txt' % (pre, root)
return pj(dirname, rp_fname)
def long_vector(self, contrast, piece):
"""When we want to compare sessions, we can't just replicate/scale
vectors
To do this, the contrast must have a field called vectors (note the s!)
which looks like this:
vectors:
- '1 0 0'
- '0 0 0'
- '0 0 0'
- '-1 0 0'
with as many items as there are runs. And...your subjects better have
the same amount of runs!
The returned vector looks like...
"vectors[0] zeros(1,# of regressors found by art run 1) vectors[1] etc."
"""
if len(contrast['vectors']) != self.n_runs:
raise ValueError("""Cannot create such a fancy contrast when # of
runs != number of vectors in contrast.""")
vector_string = ''
for v, run_n in zip(contrast['vectors'], range(1, self.n_runs + 1)):
# load the art file (.txt) and grab the number of regressors
try:
with open(self.art_file(run_n, 'txt')) as f:
n_reg = int(f.read())
except IOError:
raise IOError("The art.txt file hasn't been made for run # %d"
% run_n)
vector_string += '%s %s' % (v, '0 ' * n_reg)
return vector_string
def art_file(self, run_n, ext='mat'):
""" Return the path to the art-created regression and
outliers mat file"""
raw_img = self.raw[run_n - 1]
dirname, basename = os.path.split(raw_img)
im_fname, _ = os.path.splitext(basename)
art_fname = 'art_regression_outliers_and_movement_%s.%s' % (im_fname, ext)
return pj(dirname, art_fname)
def generate_session(self, run_n, piece):
"""Handle the intricacies of generating session text
We might place subject specific session text in the self.subj,
so look for that too."""
good_dict = self.cascade('session')
if self.par_name in self.subj:
subj_par = self.subj[self.par_name]
if 'SPM' in subj_par:
subj_spm = subj_par['SPM']
if 'session' in subj_spm:
subj_sess = subj_spm['session']
if 'multiple_condition_mat' in subj_sess:
if isinstance(subj_sess['multiple_condition_mat'], dict):
subj_multcond = subj_sess['multiple_condition_mat']
if piece['name'] in subj_multcond:
good_dict['multiple_condition_mat'] = subj_sess['multiple_condition_mat'][piece['name']]
all_mult_cond = good_dict['multiple_condition_mat']
if piece['name'] in all_mult_cond:
all_mult_cond = all_mult_cond[piece['name']]
good_dict['multiple_condition_mat'] = all_mult_cond[run_n - 1]
gen_keys = [k for (k,v) in good_dict.iteritems() if v == 'gen']
for key in gen_keys:
if key == 'images':
all_img = self.generate('images', 'session', piece)
all_img = self.generate_images('session', piece)
good_dict['images'] = all_img[run_n - 1]
if key == 'multiple_regression_file':
# if 'art' is a piece and the piece wants to use art, use it
if 'art' in piece:
use_art = piece['art']
else:
raise SpecError('art should be a key in the %s piece'
% piece['name'])
if self.get_piece('art_rej') and use_art:
good_dict['multiple_regression_file'] = self.art_file(run_n)
else:
good_dict['multiple_regression_file'] = self.mvmt_file(run_n)
if key == 'session_n':
good_dict['session_n'] = run_n
return self.rep_text(self.text['session'], good_dict)
def generate_contrast(self, n_con, contrast, piece):
"""Generate contrast text for a given contrast"""
rep_dict = self.cascade('contrast')
rep_dict.update(contrast)
rep_dict['number'] = n_con
if rep_dict['replication'] == 'none':
rep_dict['vector'] = self.long_vector(contrast, piece)
return self.rep_text(self.text['contrast'], rep_dict)
def generate_images(self, stage, piece):
"""Generate spm text of images"""
pre = self.find_prefix(stage, piece)
xfm = []
ran = range(1, self.n_volumes+1)
for raw in self.raw:
(dirname, base) = os.path.split(raw)
pre_raw = pj(dirname, pre+base)
xfm.append('\n'.join(["'%s,%d'" % (pre_raw, d) for d in ran]))
if stage in ['slicetime', 'realign-er']:
fmt = '{%s}'
if stage in ['normalize-er', 'smooth', 'session']:
fmt = '%s'
if 'stats' not in piece['type']:
value = '\n'.join([fmt % x for x in xfm])
else:
value = xfm
return value
def generate(self, key, stage, piece):
"""Handles responsibility for replacing 'gen' with the correct
value"""
value = ''
if key == 'images':
value = self.generate_images(stage, piece)
if key == 'source':
raw_image = self.raw[0]
(dirname, base ) = os.path.split(raw_image)
pre = self.find_prefix(stage, piece)
value = pj(dirname, '%s%s%s' % ('mean', pre[1:], base))
if key == 'session':
for n_run in range(1,self.n_runs+1):
value += self.generate_session(n_run, piece)
if key == 'directory':
value = self.analysis_dir(piece['name'])
if key == 'spm_mat_file':
value = pj(self.analysis_dir(piece['name']), 'SPM.mat')
if key == 'contrast':
contrasts = self.paradigm['contrasts']
# for multiple model paradigms, contrasts will be a dictionary
if piece['name'] in contrasts:
contrasts = contrasts[piece['name']]
for n_con, contrast in enumerate(contrasts):
value += self.generate_contrast( n_con + 1, contrast, piece)
return value
def make_art_sess(self, piece):
"""Write out the art.m and art_session.txt file"""
rep = {'n_runs':self.n_runs}
sess_txt = self.rep_text(self.text['art_sess'], rep)
for run_n in range(self.n_runs):
per_sess_text = "session %d image %s\nsession %d motion %s\n"
im_path = self.raw[run_n]
mvmt = self.mvmt_file(run_n + 1)
fmt = (run_n + 1, im_path, run_n + 1, mvmt)
sess_txt += per_sess_text % fmt
sess_txt += 'end\n'
sess_fname = self.batch_path('art_sess', 'txt')
with open(sess_fname, 'w') as f:
f.writelines(sess_txt)
return sess_fname
def analysis_dir(self, pname):
"""Return analysis directory for this piece
Guarantees the analysis directory exists on the filesystem"""
subj_dir = pj(self.out_dir, 'results')
analysis_dir = pj(subj_dir, self.id)
piece_dir = pj(analysis_dir, pname)
map(self.make_dir, [subj_dir, analysis_dir, piece_dir])
return piece_dir
def cascade(self, stage):
"""Cascade dictionaries into the "best" dict for the subject"""
keys = ['g', 'pr', 'pa', 's']
good = {}
for k in keys:
if stage in self.spm[k]:
good.update(self.spm[k][stage])
return good
def find_dict(self, stage, piece):
"""
Begin with global SPM settings and refine the stage's dictionary up total
the subject level"""
good = self.cascade(stage)
#find all the keys with values == 'gen'
to_gen = [k for (k,v) in good.iteritems() if v == 'gen']
for key in to_gen:
new_value = self.generate(key, stage, piece)
if new_value == '':
print('Warning: no value was generated for stage:%s, key:%s' %
(stage, key) )
good[key] = new_value
return good
def rep_text(self, text, d):
"""Simple wrapper for the string.Template method"""
return Template(text).safe_substitute(d)
def header_text(self, piece):
"""Return header text, to be inserted above a SPM batch"""
header = """
%% Nibble-generated SPM batch
%% Date created: %s
%% Project: %s
%% Paradigm: %s
%% Subject: %s
%% Piece: %s
cd('%s')
"""
fmt = (strftime('%Y - %b - %d %H:%M:%S'), self.project['name'],
self.par_name, self.id, piece['name'], self.analysis_dir(piece['name']))
return header % fmt
def resolve(self):
"""The guts of the SPM class
This method resolves each stage of each piece to SPM text
"""
self.output = {}
self.replace_dict = {}
for piece in self.pieces:
pname = piece['name']
ptype = piece['type']
if ptype in ('preprocess', 'stats'):
self.output[pname] = self.header_text(piece)
self.output[pname] += "spm fmri"
for stage in piece['stages']:
self.replace_dict[stage] = self.find_dict(stage, piece)
new_stage = self.rep_text(self.text[stage],
self.replace_dict[stage])
if new_stage.count('$') > 0:
warn('Some keywords were not replaced')
self.output[pname] += new_stage
exec_dict = {'new_ps':'%s_%s.ps' % (self.id, piece['name'])}
self.output[pname] += self.rep_text(self.text['exec'], exec_dict)
elif ptype == 'art' :
sess_fname = self.make_art_sess(piece)
reg_width_list = []
for n_run in range(1, self.n_runs+1):
art_mat_file = self.art_file(n_run)
reg_width_list.append("\tregression_width('%s');"
% art_mat_file)
self.output[pname] = self.rep_text(self.text['art'],
{'art_sessfile': sess_fname,
'art_jpg':self.piece_orig_path(piece),
'reg_width_text':'\n'.join(reg_width_list)})
# add other ptypes here
pass
def make_dir(self, path):
"""Ensure a directory exists"""
if not os.path.isdir(path):
os.makedirs(path)
def piece_path(self, piece):
"""Return the path to which a piece's batch will be written
Generated as self.out_dir/batches/self.id/piece.m"""
return self.batch_path(piece['name'], 'm')
def dump(self):
"""Write out each batch to the correct file"""
# for now assume output dir exists
#os.makedirs(pj(self.out_dir, 'batches'))
print('Dumping %s batches...' % self.id)
for piece in self.pieces:
output_path = self.piece_path(piece)
with open(output_path, 'w') as f:
try:
f.writelines(self.output[piece['name']])
except IOError:
print("Error when dumping batch text")
def log_path(self, piece):
"""Return path to a logfile for each piece"""
return self.batch_path(piece['name'], 'log')
def batch_path(self, fname, ext):
batch_dir = pj(self.out_dir, 'batches', self.id)
self.make_dir(batch_dir)
return pj(batch_dir, '%s.%s' % (fname, ext))
def touch(self, fname):
"""Create an empty file at fname"""
open(fname, 'w').close()
def ps2pdf(self, ps_name, pdf_name):
"""Full paths for each filename given, does what it says"""
return util.run_cmdline('pstopdf %s %s' % (ps_name, pdf_name))
def piece_orig_path(self, piece):
"""Return the path to the image file produced by the piece"""
if 'art' in piece['name'] :
ext = 'jpg'
else:
ext = 'ps'
return self._piece_image_path(piece['name'], ext)
def piece_pdf_path(self, piece):
""" The final pdf for the piece"""
return self._piece_image_path(piece['name'], 'pdf')
def _piece_image_path(self, pname, ext):
""" Private """
return pj(self.analysis_dir(pname), '%s_%s.%s' % (self.id, pname, ext))
def jpg2pdf(self, orig_file, pdf_file):
"""Convert jpg file to pdf file"""
try:
from PIL import Image
jpg_im = Image.open(orig_file)
jpg_im.save(pdf_file, 'PDF')
except ImportError, IOError:
print("Falling back to ImageMagick to convert...")
return_val = util.run_cmdline("convert %s %s" % (orig_file, pdf_file))
def run(self):
"""Execute each piece"""
if not self.skip:
for piece in self.pieces:
finish_file = self.batch_path(piece['name'], 'finish')
if not os.path.isfile(finish_file):
cmdline = '%s -nosplash < %s >& %s'
piece_mfile = self.piece_path(piece)
piece_log = self.log_path(piece)
strf = '%Y%m%d %H:%M:%S'
beg_time = time.strftime(strf)
print('%s:%s:%s: begin %s' % (self.par_name, self.id, piece['name'], beg_time))
return_val = util.run_cmdline(cmdline % (self.mlab_path, piece_mfile, piece_log))
end_time = time.strftime(strf)
print('%s:%s:%s: end %s' % (self.par_name, self.id, piece['name'], end_time))
v = 'Piece:%s\nBegan: %s\nEnded: %s\n'
email_text = v % (piece['name'], beg_time, end_time)
orig_file = self.piece_orig_path(piece)
pdf_file = self.piece_pdf_path(piece)
if return_val == 0:
email_text += "Success\n"
if os.path.isfile(orig_file):
_, ext = os.path.splitext(orig_file)
if ext == '.jpg':
self.jpg2pdf(orig_file, pdf_file)
if ext == '.ps':
self.ps2pdf(orig_file, pdf_file)
if return_val == 1:
email_text += "Success, no .ps file was created\n"
if return_val == 2:
email_text += "Success, couldn't copy .ps file"
if return_val == 3:
email_text += "Error(s)\n"
#TODO rescue ps
if return_val in [0, 1, 2]:
self.touch(finish_file)
if os.path.isfile(piece_log):
with open(piece_log, 'r') as f:
email_text += f.read()
else:
email_text += "Couldn't open log file.\n"
if self.email:
subject_line = '%s:%s %s' % (self.project['name'],
self.par_name, self.id)
util.email(self.email['address'],
self.email['to'],
subject_line,
self.email['server'],
self.email['pw'],
email_text, pdf_file)
else:
print("%s:%s:%s: skipping" % (self.par_name, self.id, piece['name']))
def output_images(self, piece_names=['all']):
"""Return a list of output images (probably pdfs) in piece order
The returned images are checked for existence."""
output = []
for piece in self.pieces:
if piece['name'] in piece_names or piece_names[0] == 'all':
piece_pdf = self.piece_pdf_path(piece)
if os.path.isfile(piece_pdf):
output.append(piece_pdf)
return output
| EBRL/Nibble | nibble/spm.py | Python | bsd-2-clause | 29,054 |
from bears.matlab.MatlabIndentationBear import MatlabIndentationBear
from coalib.testing.LocalBearTestHelper import verify_local_bear
MatlabIndentationBearTest = verify_local_bear(
MatlabIndentationBear,
valid_files=('if a ~= b\n a\nendif\n',
'if a ~= b\n a\nendif\n',
'if a ~= b\n a\n \nelse\n a\nendif\n'),
invalid_files=(' A',
'if a ~= b\na\nendif\n',
'if a ~= b\n a\nendif\n',
'if a ~= b\n a\nendif\n',
'if a ~= b\n a\n else\n a\nendif\n'))
| seblat/coala-bears | tests/matlab/MatlabIndentationBearTest.py | Python | agpl-3.0 | 577 |
# -*- coding: utf-8 -*-
# Copyright(c) 2016-2020 Jonas Sjöberg <autonameow@jonasjberg.com>
# Source repository: https://github.com/jonasjberg/autonameow
#
# This file is part of autonameow.
#
# autonameow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# autonameow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autonameow. If not, see <http://www.gnu.org/licenses/>.
from .extractor_crossplatform import CrossPlatformFileSystemExtractor
from .extractor_epub import EpubMetadataExtractor
from .extractor_exiftool import ExiftoolMetadataExtractor
from .extractor_filetags import FiletagsMetadataExtractor
from .extractor_guessit import GuessitMetadataExtractor
from .extractor_jpeginfo import JpeginfoMetadataExtractor
from .extractor_pandoc import PandocMetadataExtractor
| jonasjberg/autonameow | autonameow/extractors/metadata/__init__.py | Python | gpl-2.0 | 1,177 |
import sys
import time
from io import StringIO
from session import extract_header, Message
from IPython.utils import io, text, encoding
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Stream classes
#-----------------------------------------------------------------------------
class OutStream(object):
"""A file like object that publishes the stream to a 0MQ PUB socket."""
# The time interval between automatic flushes, in seconds.
flush_interval = 0.05
topic=None
def __init__(self, session, pub_socket, name):
self.session = session
self.pub_socket = pub_socket
self.name = name
self.parent_header = {}
self._new_buffer()
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
self.pub_socket = None
def flush(self):
#io.rprint('>>>flushing output buffer: %s<<<' % self.name) # dbg
if self.pub_socket is None:
raise ValueError(u'I/O operation on closed file')
else:
data = self._buffer.getvalue()
if data:
content = {u'name':self.name, u'data':data}
msg = self.session.send(self.pub_socket, u'stream', content=content,
parent=self.parent_header, ident=self.topic)
if hasattr(self.pub_socket, 'flush'):
# socket itself has flush (presumably ZMQStream)
self.pub_socket.flush()
self._buffer.close()
self._new_buffer()
def isatty(self):
return False
def next(self):
raise IOError('Read not supported on a write only stream.')
def read(self, size=-1):
raise IOError('Read not supported on a write only stream.')
def readline(self, size=-1):
raise IOError('Read not supported on a write only stream.')
def write(self, string):
if self.pub_socket is None:
raise ValueError('I/O operation on closed file')
else:
# Make sure that we're handling unicode
if not isinstance(string, unicode):
enc = encoding.DEFAULT_ENCODING
string = string.decode(enc, 'replace')
self._buffer.write(string)
current_time = time.time()
if self._start <= 0:
self._start = current_time
elif current_time - self._start > self.flush_interval:
self.flush()
def writelines(self, sequence):
if self.pub_socket is None:
raise ValueError('I/O operation on closed file')
else:
for string in sequence:
self.write(string)
def _new_buffer(self):
self._buffer = StringIO()
self._start = -1
| cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/zmq/iostream.py | Python | lgpl-3.0 | 3,021 |
# -*- coding: utf-8 -*-
# Copyright 2014 Associazione Odoo Italia (<http://www.odoo-italia.org>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import models
| linkitspa/l10n-italy | l10n_it_pec/__init__.py | Python | agpl-3.0 | 188 |
from pyflo.components.string.string import SplitComponent
| bennyrowland/pyflo | pyflo/components/string/__init__.py | Python | mit | 58 |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common library for network ports and protocol handling."""
class Error(Exception):
"""Base error class."""
class BadPortValue(Error):
"""Invalid port format."""
class BadPortRange(Error):
"""Out of bounds port range."""
class InvalidRange(Error):
"""Range is not valid (eg, single port)."""
class NotSinglePort(Error):
"""Port range defined instead of a single port."""
class PPP:
"""PPP: [P]ort [P]rotocol [P]airs.
Make port/protocol pairs an object for easy comparisons
"""
def __init__(self, service):
"""Init for PPP object.
Args:
service: A port/protocol pair as str (eg: '80/tcp', '22-23/tcp') or
a nested service name (eg: 'SSH')
"""
# remove comments (if any)
self.service = service.split('#')[0].strip()
if '/' in self.service:
self.port = self.service.split('/')[0]
self.protocol = self.service.split('/')[1]
self.nested = False
else:
# for nested services
self.nested = True
self.port = None
self.protocol = None
@property
def is_range(self):
if self.port:
return '-' in self.port
else:
return False
@property
def is_single_port(self):
if self.port:
return '-' not in self.port
else:
return False
@property
def start(self):
# return the first port in the range as int
if '-' in self.port:
self._start = int(self.port.split('-')[0])
else:
raise InvalidRange('%s is not a valid port range' % self.port)
return self._start
@property
def end(self):
# return the last port in the range as int
if '-' in self.port:
self._end = int(self.port.split('-')[1])
else:
raise InvalidRange('%s is not a valid port range' % self.port)
return self._end
def __contains__(self, other):
# determine if a single-port object is within another objects' range
try:
return ((int(self.start) <= int(other.port) <= int(self.end)) and
self.protocol == other.protocol)
except:
raise InvalidRange('%s must be a range' % self.port)
def __lt__(self, other):
if self.is_single_port:
try:
return int(self.port) < int(other.port)
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def __gt__(self, other):
if self.is_single_port:
try:
return int(self.port) > int(other.port)
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def __le__(self, other):
if self.is_single_port:
try:
return int(self.port) <= int(other.port)
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def __ge__(self, other):
if self.is_single_port:
try:
return int(self.port) >= int(other.port)
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def __eq__(self, other):
if self.is_single_port:
try:
return (int(self.port) == int(other.port) and
self.protocol == other.protocol)
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def Port(port):
"""Sanitize a port value.
Args:
port: a port value
Returns:
port: a port value
Raises:
BadPortValue: port is not valid integer or string
BadPortRange: port is outside valid range
"""
pval = -1
try:
pval = int(port)
except ValueError:
raise BadPortValue('port %s is not valid.' % port)
if pval < 0 or pval > 65535:
raise BadPortRange('port %s is out of range 0-65535.' % port)
return pval
| google/capirca | capirca/lib/port.py | Python | apache-2.0 | 4,390 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
from oslo_config import cfg
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import senlin
from heat.engine.resources.openstack.senlin import node as sn
from heat.engine import scheduler
from heat.engine import template
from heat.tests import common
from heat.tests import utils
from senlinclient.common import exc
node_stack_template = """
heat_template_version: 2016-04-08
description: Senlin Node Template
resources:
senlin-node:
type: OS::Senlin::Node
properties:
name: SenlinNode
profile: fake_profile
metadata:
foo: bar
"""
class FakeNode(object):
def __init__(self, id='some_id', status='ACTIVE'):
self.status = status
self.status_reason = 'Unknown'
self.id = id
self.name = "SenlinNode"
self.metadata = {'foo': 'bar'}
self.profile_id = "fake_profile"
self.cluster_id = "fake_cluster"
self.details = {'id': 'physical_object_id'}
def to_dict(self):
return {
'id': self.id,
'status': self.status,
'status_reason': self.status_reason,
'name': self.name,
'metadata': self.metadata,
'profile_id': self.profile_id,
'cluster_id': self.cluster_id,
}
class SenlinNodeTest(common.HeatTestCase):
def setUp(self):
super(SenlinNodeTest, self).setUp()
self.senlin_mock = mock.MagicMock()
self.patchobject(sn.Node, 'client', return_value=self.senlin_mock)
self.patchobject(senlin.ProfileConstraint, 'validate',
return_value=True)
self.patchobject(senlin.ClusterConstraint, 'validate',
return_value=True)
self.fake_node = FakeNode()
self.t = template_format.parse(node_stack_template)
self.stack = utils.parse_stack(self.t)
self.node = self.stack['senlin-node']
def _create_node(self):
self.senlin_mock.create_node.return_value = self.fake_node
self.senlin_mock.get_node.return_value = self.fake_node
self.senlin_mock.get_action.return_value = mock.Mock(
status='SUCCEEDED')
scheduler.TaskRunner(self.node.create)()
self.assertEqual((self.node.CREATE, self.node.COMPLETE),
self.node.state)
self.assertEqual(self.fake_node.id, self.node.resource_id)
return self.node
def test_node_create_success(self):
self._create_node()
expect_kwargs = {
'name': 'SenlinNode',
'profile_id': 'fake_profile',
'metadata': {'foo': 'bar'},
}
self.senlin_mock.create_node.assert_called_once_with(
**expect_kwargs)
self.senlin_mock.get_node.assert_called_once_with(self.fake_node.id)
def test_node_create_error(self):
cfg.CONF.set_override('action_retry_limit', 0, enforce_type=True)
self.senlin_mock.create_node.return_value = self.fake_node
self.senlin_mock.get_node.return_value = FakeNode(
status='ERROR')
create_task = scheduler.TaskRunner(self.node.create)
ex = self.assertRaises(exception.ResourceFailure, create_task)
expected = ('ResourceInError: resources.senlin-node: '
'Went to status ERROR due to "Unknown"')
self.assertEqual(expected, six.text_type(ex))
def test_node_delete_success(self):
node = self._create_node()
self.senlin_mock.get_node.side_effect = [
exc.sdkexc.ResourceNotFound('SenlinNode'),
]
scheduler.TaskRunner(node.delete)()
self.senlin_mock.delete_node.assert_called_once_with(
node.resource_id)
def test_cluster_delete_error(self):
node = self._create_node()
self.senlin_mock.get_node.side_effect = exception.Error('oops')
delete_task = scheduler.TaskRunner(node.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = 'Error: resources.senlin-node: oops'
self.assertEqual(expected, six.text_type(ex))
def test_node_update_profile(self):
node = self._create_node()
new_t = copy.deepcopy(self.t)
props = new_t['resources']['senlin-node']['properties']
props['profile'] = 'new_profile'
props['name'] = 'new_name'
rsrc_defns = template.Template(new_t).resource_definitions(self.stack)
new_node = rsrc_defns['senlin-node']
self.senlin_mock.update_node.return_value = mock.Mock(
location='/actions/fake-action')
scheduler.TaskRunner(node.update, new_node)()
self.assertEqual((node.UPDATE, node.COMPLETE), node.state)
node_update_kwargs = {
'profile_id': 'new_profile',
'name': 'new_name'
}
self.senlin_mock.update_node.assert_called_once_with(
node.resource_id, **node_update_kwargs)
self.senlin_mock.get_action.assert_called_once_with(
'fake-action')
def test_node_update_failed(self):
node = self._create_node()
new_t = copy.deepcopy(self.t)
props = new_t['resources']['senlin-node']['properties']
props['name'] = 'new_name'
rsrc_defns = template.Template(new_t).resource_definitions(self.stack)
new_node = rsrc_defns['senlin-node']
self.senlin_mock.update_node.return_value = mock.Mock(
location='/actions/fake-action')
self.senlin_mock.get_action.return_value = mock.Mock(
status='FAILED', status_reason='oops')
update_task = scheduler.TaskRunner(node.update, new_node)
ex = self.assertRaises(exception.ResourceFailure, update_task)
expected = ('ResourceInError: resources.senlin-node: Went to '
'status FAILED due to "oops"')
self.assertEqual(expected, six.text_type(ex))
self.assertEqual((node.UPDATE, node.FAILED), node.state)
self.senlin_mock.get_action.assert_called_once_with(
'fake-action')
def test_cluster_resolve_attribute(self):
excepted_show = {
'id': 'some_id',
'status': 'ACTIVE',
'status_reason': 'Unknown',
'name': 'SenlinNode',
'metadata': {'foo': 'bar'},
'profile_id': 'fake_profile',
'cluster_id': 'fake_cluster'
}
node = self._create_node()
self.assertEqual(excepted_show,
node._show_resource())
self.assertEqual(self.fake_node.details,
node._resolve_attribute('details'))
| steveb/heat | heat/tests/openstack/senlin/test_node.py | Python | apache-2.0 | 7,231 |
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import six
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
request.ws_extension_processors = []
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, six.text_type):
request.ws_stream.send_message(line, binary=False)
if line == _GOODBYE_MESSAGE:
return
else:
request.ws_stream.send_message(line, binary=True)
| nwjs/chromium.src | third_party/blink/web_tests/http/tests/websocket/echo-with-no-extension_wsh.py | Python | bsd-3-clause | 2,057 |
import shutil
import os
# Take index.html and copy it to each of the lesson folders. index.html itself is only a template.
for fileName in os.listdir('.'):
if os.path.isdir(fileName):
print fileName
shutil.copy2('index.html', fileName + '/index.html') | larsiusprime/HaxePloverLearn | assets/distribute_lessons.py | Python | mit | 273 |
# Create your views here.
from django.shortcuts import render
def index(request):
return render(request, 'stream/index.html', {}); | IDEHCO3/geonode | idehco3/stream/views.py | Python | gpl-3.0 | 136 |
# Created by Pearu Peterson, September 2002
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_less,
assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy.fft._pocketfft import (ifft, fft, fftn, ifftn,
rfft, irfft, rfftn, irfftn, fft2)
from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
swapaxes, cdouble)
import numpy as np
import numpy.fft
from numpy.random import rand
# "large" composite numbers supported by FFT._PYPOCKETFFT
LARGE_COMPOSITE_SIZES = [
2**13,
2**5 * 3**5,
2**3 * 3**3 * 5**2,
]
SMALL_COMPOSITE_SIZES = [
2,
2*3*5,
2*2*3*3,
]
# prime
LARGE_PRIME_SIZES = [
2011
]
SMALL_PRIME_SIZES = [
29
]
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = "size: %s rdt: %s" % (size, rdt)
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
def random(size):
return rand(*size)
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = fft(x, axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = ifft(x, axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
y = zeros(n//2+1, dtype=cdouble)
for i in range(n//2+1):
y[i] = dot(exp(i*w), x)
return y
def direct_irdft(x, n):
x = asarray(x)
x1 = zeros(n, dtype=cdouble)
for i in range(n//2+1):
x1[i] = x[i]
if i > 0 and 2*i < n:
x1[n-i] = np.conj(x[i])
return direct_idft(x1).real
def direct_rdftn(x):
return fftn(rfft(x), axes=range(x.ndim - 1))
class _TestFFTBase(object):
def setup_method(self):
self.cdt = None
self.rdt = None
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
y = fft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y = fft(x.astype(complex))
y2 = numpy.fft.fft(x)
assert_array_almost_equal(y,y2)
y = fft(x)
assert_array_almost_equal(y,y2)
def test_invalid_sizes(self):
assert_raises(ValueError, fft, [])
assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
class TestLongDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longdouble
class TestDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class TestFloat16FFT(object):
def test_1_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft(x1, n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (4, ))
assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def test_n_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
x2 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft([x1, x2], n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (2, 4))
assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
class _TestIFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y = ifft(x.astype(self.cdt))
y2 = numpy.fft.ifft(x)
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
y = ifft(x)
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
@pytest.mark.skipif(np.longdouble is np.float64,
reason="Long double is aliased to double")
class TestLongDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longdouble
self.rtol = 1e-10
self.atol = 1e-10
class TestDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.rtol = 1e-10
self.atol = 1e-10
class TestSingleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.rtol = 1e-5
self.atol = 1e-4
class _TestRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.cdt)
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y1 = np.fft.rfft(x)
y = rfft(x)
assert_array_almost_equal(y,y1)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
def test_complex_input(self):
x = np.zeros(10, dtype=self.cdt)
with assert_raises(TypeError, match="x must be a real sequence"):
rfft(x)
# See gh-5790
class MockSeries(object):
def __init__(self, data):
self.data = np.asarray(data)
def __getattr__(self, item):
try:
return getattr(self.data, item)
except AttributeError:
raise AttributeError(("'MockSeries' object "
"has no attribute '{attr}'".
format(attr=item)))
def test_non_ndarray_with_dtype(self):
x = np.array([1., 2., 3., 4., 5.])
xs = _TestRFFTBase.MockSeries(x)
expected = [1, 2, 3, 4, 5]
rfft(xs)
# Data should not have been overwritten
assert_equal(x, expected)
assert_equal(xs.data, expected)
@pytest.mark.skipif(np.longfloat is np.float64,
reason="Long double is aliased to double")
class TestRFFTLongDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longfloat
class TestRFFTDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2+3j,4+1j,1+2j,3+4j]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x1 = x1_1[:5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
x2 = x2_1[:5]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.cdt), n=len(xr))
y1 = direct_irdft(x, len(xr))
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(-1, n, 2) + 1j * np.arange(0, n+1, 2)
x[0] = 0
if n % 2 == 0:
x[-1] = np.real(x[-1])
y1 = np.fft.irfft(x)
y = irfft(x)
assert_array_almost_equal(y,y1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x), n=size)
y2 = rfft(irfft(x, n=(size*2-1)))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x, decimal=self.ndec,
err_msg="size=%d" % size)
assert_array_almost_equal(y2, x, decimal=self.ndec,
err_msg="size=%d" % size)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x), len(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x, 2 * len(x) - 1))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
@pytest.mark.skipif(np.longfloat is np.float64,
reason="Long double is aliased to double")
class TestIRFFTLongDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTSingle(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.ndec = 5
class Testfft2(object):
def setup_method(self):
np.random.seed(1234)
def test_regression_244(self):
"""FFT returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4, 4, 2))
y = fft2(x, s=(8, 8), axes=(-3, -2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
class TestFftnSingle(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float32))
assert_(y.dtype == np.complex64,
msg="double precision output with single precision")
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_size_accuracy_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_size_accuracy_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
def test_definition_float16(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float16))
assert_equal(y.dtype, np.complex64)
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_float16_input_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 5e5)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_float16_input_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2e6)
class TestFftn(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(x)
assert_array_almost_equal(y, direct_dftn(x))
x = random((20, 26))
assert_array_almost_equal(fftn(x), direct_dftn(x))
x = random((5, 4, 3, 20))
assert_array_almost_equal(fftn(x), direct_dftn(x))
def test_axes_argument(self):
# plane == ji_plane, x== kji_space
plane1 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
plane2 = [[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
plane3 = [[19, 20, 21],
[22, 23, 24],
[25, 26, 27]]
ki_plane1 = [[1, 2, 3],
[10, 11, 12],
[19, 20, 21]]
ki_plane2 = [[4, 5, 6],
[13, 14, 15],
[22, 23, 24]]
ki_plane3 = [[7, 8, 9],
[16, 17, 18],
[25, 26, 27]]
jk_plane1 = [[1, 10, 19],
[4, 13, 22],
[7, 16, 25]]
jk_plane2 = [[2, 11, 20],
[5, 14, 23],
[8, 17, 26]]
jk_plane3 = [[3, 12, 21],
[6, 15, 24],
[9, 18, 27]]
kj_plane1 = [[1, 4, 7],
[10, 13, 16], [19, 22, 25]]
kj_plane2 = [[2, 5, 8],
[11, 14, 17], [20, 23, 26]]
kj_plane3 = [[3, 6, 9],
[12, 15, 18], [21, 24, 27]]
ij_plane1 = [[1, 4, 7],
[2, 5, 8],
[3, 6, 9]]
ij_plane2 = [[10, 13, 16],
[11, 14, 17],
[12, 15, 18]]
ij_plane3 = [[19, 22, 25],
[20, 23, 26],
[21, 24, 27]]
ik_plane1 = [[1, 10, 19],
[2, 11, 20],
[3, 12, 21]]
ik_plane2 = [[4, 13, 22],
[5, 14, 23],
[6, 15, 24]]
ik_plane3 = [[7, 16, 25],
[8, 17, 26],
[9, 18, 27]]
ijk_space = [jk_plane1, jk_plane2, jk_plane3]
ikj_space = [kj_plane1, kj_plane2, kj_plane3]
jik_space = [ik_plane1, ik_plane2, ik_plane3]
jki_space = [ki_plane1, ki_plane2, ki_plane3]
kij_space = [ij_plane1, ij_plane2, ij_plane3]
x = array([plane1, plane2, plane3])
assert_array_almost_equal(fftn(x),
fftn(x, axes=(-3, -2, -1))) # kji_space
assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
y = fftn(x, axes=(2, 1, 0)) # ijk_space
assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
y = fftn(x, axes=(2, 0, 1)) # ikj_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
fftn(ikj_space))
y = fftn(x, axes=(1, 2, 0)) # jik_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
fftn(jik_space))
y = fftn(x, axes=(1, 0, 2)) # jki_space
assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
y = fftn(x, axes=(0, 2, 1)) # kij_space
assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
y = fftn(x, axes=(-2, -1)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(1, 2)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(-3, -2)) # kj_plane
assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
y = fftn(x, axes=(-3, -1)) # ki_plane
assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
y = fftn(x, axes=(-1, -2)) # ij_plane
assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
y = fftn(x, axes=(-1, -3)) # ik_plane
assert_array_almost_equal(fftn(ik_plane1),
swapaxes(y[:, 0, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane2),
swapaxes(y[:, 1, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane3),
swapaxes(y[:, 2, :], -1, -2))
y = fftn(x, axes=(-2, -3)) # jk_plane
assert_array_almost_equal(fftn(jk_plane1),
swapaxes(y[:, :, 0], -1, -2))
assert_array_almost_equal(fftn(jk_plane2),
swapaxes(y[:, :, 1], -1, -2))
assert_array_almost_equal(fftn(jk_plane3),
swapaxes(y[:, :, 2], -1, -2))
y = fftn(x, axes=(-1,)) # i_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
y = fftn(x, axes=(-2,)) # j_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
y = fftn(x, axes=(0,)) # k_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
y = fftn(x, axes=()) # point
assert_array_almost_equal(y, x)
def test_shape_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6]]
large_x1 = [[1, 2, 3, 0],
[4, 5, 6, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
y = fftn(small_x, s=(4, 4))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, s=(3, 4))
assert_array_almost_equal(y, fftn(large_x1[:-1]))
def test_shape_axes_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
large_x1 = array([[1, 2, 3, 0],
[4, 5, 6, 0],
[7, 8, 9, 0],
[0, 0, 0, 0]])
y = fftn(small_x, s=(4, 4), axes=(-2, -1))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, s=(4, 4), axes=(-1, -2))
assert_array_almost_equal(y, swapaxes(
fftn(swapaxes(large_x1, -1, -2)), -1, -2))
def test_shape_axes_argument2(self):
# Change shape of the last axis
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-1,), s=(8,))
assert_array_almost_equal(y, fft(x, axis=-1, n=8))
# Change shape of an arbitrary axis which is not the last one
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-2,), s=(8,))
assert_array_almost_equal(y, fft(x, axis=-2, n=8))
# Change shape of axes: cf #244, where shape and axes were mixed up
x = numpy.random.random((4, 4, 2))
y = fftn(x, axes=(-3, -2), s=(8, 8))
assert_array_almost_equal(y,
numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
def test_shape_argument_more(self):
x = zeros((4, 4, 2))
with assert_raises(ValueError,
match="shape requires more axes than are present"):
fftn(x, s=(8, 8, 2, 1))
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
fftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
fftn([[1, 1], [2, 2]], (4, -3))
def test_no_axes(self):
x = numpy.random.random((2,2,2))
assert_allclose(fftn(x, axes=[]), x, atol=1e-7)
class TestIfftn(object):
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = ifftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
@pytest.mark.parametrize('maxnlp', [2000, 3500])
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random_complex(self, maxnlp, size):
x = random([size, size]) + 1j*random([size, size])
assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
ifftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
ifftn([[1, 1], [2, 2]], (4, -3))
def test_no_axes(self):
x = numpy.random.random((2,2,2))
assert_allclose(ifftn(x, axes=[]), x, atol=1e-7)
class TestRfftn(object):
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = rfftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_rdftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random(self, size):
x = random([size, size])
assert_allclose(irfftn(rfftn(x), x.shape), x, atol=1e-10)
@pytest.mark.parametrize('func', [rfftn, irfftn])
def test_invalid_sizes(self, func):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
func([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
func([[1, 1], [2, 2]], (4, -3))
@pytest.mark.parametrize('func', [rfftn, irfftn])
def test_no_axes(self, func):
with assert_raises(ValueError,
match="at least 1 axis must be transformed"):
func([], axes=[])
def test_complex_input(self):
with assert_raises(TypeError, match="x must be a real sequence"):
rfftn(np.zeros(10, dtype=np.complex64))
class FakeArray(object):
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class FakeArray2(object):
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
# TODO: Is this test actually valuable? The behavior it's testing shouldn't be
# relied upon by users except for overwrite_x = False
class TestOverwrite(object):
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = [np.float32, np.float64, np.longfloat]
dtypes = real_dtypes + [np.complex64, np.complex128, np.longcomplex]
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and fftsize <= shape[axis])
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.longcomplex, np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
def part_shape(shape, axes):
if axes is None:
return shape
else:
return tuple(np.take(shape, axes))
def should_overwrite(data, shape, axes):
s = part_shape(data.shape, axes)
return (overwrite_x and
np.prod(shape) <= np.prod(s)
and dtype in overwritable_dtypes)
for fftshape in fftshape_iter(part_shape(shape, axes)):
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(data, fftshape, axes))
if data.ndim > 1:
# check fortran order
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(
data.T, fftshape, axes))
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.longcomplex, np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
rfft, irfft, rfftn, irfftn])
def test_invalid_norm(func):
x = np.arange(10, dtype=float)
with assert_raises(ValueError,
match='Invalid norm value o, should be None or "ortho"'):
func(x, norm='o')
| arokem/scipy | scipy/fft/_pocketfft/tests/test_basic.py | Python | bsd-3-clause | 35,101 |
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import six
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from shoop.admin.base import AdminModule, MenuEntry, SearchResult
from shoop.admin.utils.urls import admin_url, derive_model_url, get_model_url
class UserModule(AdminModule):
name = _("Users")
category = _("Contacts")
breadcrumbs_menu_entry = MenuEntry(name, url="shoop_admin:user.list")
def get_urls(self):
return [
admin_url(
"^users/(?P<pk>\d+)/change-password/$",
"shoop.admin.modules.users.views.UserChangePasswordView",
name="user.change-password"
),
admin_url(
"^users/(?P<pk>\d+)/reset-password/$",
"shoop.admin.modules.users.views.UserResetPasswordView",
name="user.reset-password"
),
admin_url(
"^users/(?P<pk>\d+)/change-permissions/$",
"shoop.admin.modules.users.views.UserChangePermissionsView",
name="user.change-permissions"
),
admin_url(
"^users/(?P<pk>\d+)/$",
"shoop.admin.modules.users.views.UserDetailView",
name="user.detail"
),
admin_url(
"^users/new/$",
"shoop.admin.modules.users.views.UserDetailView",
kwargs={"pk": None},
name="user.new"
),
admin_url(
"^users/$",
"shoop.admin.modules.users.views.UserListView",
name="user.list"
),
]
def get_menu_category_icons(self):
return {self.category: "fa fa-users"}
def get_menu_entries(self, request):
return [
MenuEntry(
text=_("Users"),
icon="fa fa-users",
url="shoop_admin:user.list",
category=self.category
)
]
def get_search_results(self, request, query):
minimum_query_length = 3
if len(query) >= minimum_query_length:
users = get_user_model().objects.filter(
Q(username__icontains=query) |
Q(email=query)
)
for i, user in enumerate(users[:10]):
relevance = 100 - i
yield SearchResult(
text=six.text_type(user),
url=get_model_url(user),
category=self.category,
relevance=relevance
)
def get_model_url(self, object, kind):
return derive_model_url(get_user_model(), "shoop_admin:user", object, kind)
| taedori81/shoop | shoop/admin/modules/users/__init__.py | Python | agpl-3.0 | 2,997 |
# Copyright (C) 2013 S. Daniel Francis <francis@sugarlabs.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import time
from dictation import config
from dictation import espeak
class WordPlayer():
def __init__(self, text, console):
self.paused = False
self.stop = False
self.text = text
self.args = config.get_espeak_options()
self.current_word = ''
self.console = console
self.tbw = config.get_tbw()
self.language = config.get_language()
self.speed = config.get_speed()
for word in self.text.split():
if self.stop:
break
while self.paused:
if self.check_keys():
self.paused = not self.paused
self.current_word = word
self.console.print_word(word)
self.speak(word)
end_time = time.time() + len(word) * self.tbw
while time.time() < end_time:
if self.check_keys():
break
if self.stop:
break
def speak(self, word):
espeak.espeak(word, self.language, self.speed, self.args,
self.check_keys)
def check_keys(self):
check = self.console.check_keys()
if check is not None:
check(self)
return True
return False
| sdanielf/dictate | dictation/wordplayer.py | Python | gpl-3.0 | 2,052 |
# Big Data Smart Socket
# Copyright (C) 2016 Clemson University
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
from app.matchers import regular_expression
class TestRegularExpressionMatcher(unittest.TestCase):
def testMatch(self):
opts = dict(pattern=r"^http:\/\/example\.com\/file")
self.assertTrue(regular_expression.matches_url(opts, "http://example.com/file.txt"))
self.assertFalse(regular_expression.matches_url(opts, "http://example.com:8000/file.txt"))
self.assertTrue(regular_expression.matches_url(opts, "http://example.com/files/file1.txt"))
self.assertFalse(regular_expression.matches_url(opts, "ftp://example.com/file.txt"))
| feltus/BDSS | metadata_repository/tests/unit/url_matchers/regular_expression.py | Python | gpl-2.0 | 1,363 |
# -*- coding: utf-8 -*-
import re
from pyload.plugin.Account import Account
class QuickshareCz(Account):
__name = "QuickshareCz"
__type = "account"
__version = "0.03"
__description = """Quickshare.cz account plugin"""
__license = "GPLv3"
__authors = [("zoidberg", "zoidberg@mujmail.cz")]
TRAFFIC_LEFT_PATTERN = r'Stav kreditu: <strong>(.+?)</strong>'
def loadAccountInfo(self, user, req):
html = req.load("http://www.quickshare.cz/premium", decode=True)
m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
if m:
trafficleft = self.parseTraffic(m.group(1))
premium = bool(trafficleft)
else:
trafficleft = None
premium = False
return {"validuntil": -1, "trafficleft": trafficleft, "premium": premium}
def login(self, user, data, req):
html = req.load('http://www.quickshare.cz/html/prihlaseni_process.php',
post={"akce": u'Přihlásit',
"heslo": data['password'],
"jmeno": user},
decode=True)
if u'>Takový uživatel neexistuje.<' in html or u'>Špatné heslo.<' in html:
self.wrongPassword()
| ardi69/pyload-0.4.10 | pyload/plugin/account/QuickshareCz.py | Python | gpl-3.0 | 1,274 |
"""
find out the length of the last word in a string
https://leetcode.com/problems/length-of-last-word/
date: 10/08/21
"""
def length(s):
l = list(s)
for i in range(len(l)-1,0,-1):
if l[i] == ' ':
continue
start = i
p = start
while p>=0 and l[p] != ' ':
p -= 1
return start - p
if __name__ == '__main__':
s = 'Hello world'
print(length(s))
s = ' fly me to the moon '
print(length(s))
s = 'luffy is still joyboy'
print(length(s))
| entrepidea/projects | python/tutorials/algo/leetcode/easy/len_of_last_word.py | Python | gpl-3.0 | 556 |
from . import mass_reconcile
from . import base_reconciliation
from . import base_advanced_reconciliation
from . import simple_reconciliation
from . import advanced_reconciliation
from . import mass_reconcile_history
from . import res_config
| OCA/bank-statement-reconcile | account_mass_reconcile/models/__init__.py | Python | agpl-3.0 | 242 |
"""
unit tests for the sqs_events engine
"""
import pytest
import salt.engines.sqs_events as sqs_events
from tests.support.mock import MagicMock, patch
pytestmark = [
pytest.mark.skipif(
sqs_events.HAS_BOTO is False, reason="The boto library is not installed"
)
]
@pytest.fixture
def configure_loader_modules():
return {sqs_events: {}}
@pytest.fixture
def mock_sqs():
with patch("salt.engines.sqs_events.boto.sqs") as mock_sqs:
yield mock_sqs
def sample_msg():
fake_msg = MagicMock()
fake_msg.get_body.return_value = "This is a test message"
fake_msg.delete.return_value = True
return fake_msg
# 'present' function tests: 1
def test_no_queue_present(mock_sqs):
"""
Test to ensure the SQS engine logs a warning when queue not present
"""
with patch("salt.engines.sqs_events.log") as mock_logging:
with patch("time.sleep", return_value=None) as mock_sleep:
q = None
q_name = "mysqs"
mock_fire = MagicMock(return_value=True)
sqs_events._process_queue(q, q_name, mock_fire)
assert mock_logging.warning.called
assert not mock_sqs.queue.Queue().get_messages.called
def test_minion_message_fires(mock_sqs):
"""
Test SQS engine correctly gets and fires messages on minion
"""
msgs = [sample_msg(), sample_msg()]
mock_sqs.queue.Queue().get_messages.return_value = msgs
q = mock_sqs.queue.Queue()
q_name = "mysqs"
mock_event = MagicMock(return_value=True)
mock_fire = MagicMock(return_value=True)
with patch.dict(sqs_events.__salt__, {"event.send": mock_event}):
sqs_events._process_queue(q, q_name, mock_fire)
assert mock_sqs.queue.Queue().get_messages.called
assert all(x.delete.called for x in msgs)
def test_master_message_fires(mock_sqs):
"""
Test SQS engine correctly gets and fires messages on master
"""
msgs = [sample_msg(), sample_msg()]
mock_sqs.queue.Queue().get_messages.return_value = msgs
q = mock_sqs.queue.Queue()
q_name = "mysqs"
mock_fire = MagicMock(return_value=True)
sqs_events._process_queue(q, q_name, mock_fire)
assert mock_sqs.queue.Queue().get_messages.called, len(msgs)
assert mock_fire.called, len(msgs)
| saltstack/salt | tests/pytests/unit/engines/test_sqs_events.py | Python | apache-2.0 | 2,288 |
######################################################################
# Cloud Routes Web Application
# -------------------------------------------------------------------
# Reaction Class
######################################################################
import rethinkdb as r
class Reaction(object):
def __init__(self, rid=None):
''' Create a reaction object and set attributes as None for now '''
self.rid = rid
self.name = None
self.rtype = None
self.uid = None
self.trigger = None
self.lastrun = None
self.frequency = None
self.data = {}
def createReaction(self, rdb):
''' This will create a reaction with the supplied information '''
reactdata = {
'name': self.name,
'rtype': self.rtype,
'uid': self.uid,
'trigger': self.trigger,
'frequency': self.frequency,
'lastrun': 0,
'data': self.data}
if self.exists(reactdata['name'], reactdata['uid'], rdb):
return 'exists'
else:
results = r.table('reactions').insert(reactdata).run(rdb)
if results['inserted'] == 1:
qdata = {}
qdata['item'] = reactdata
qdata['action'] = 'create'
qdata['type'] = 'reaction'
qdata['item']['rid'] = results['generated_keys'][0]
q1 = r.table('dc1queue').insert(qdata).run(rdb)
q2 = r.table('dc2queue').insert(qdata).run(rdb)
return results['generated_keys'][0]
else:
return False
def editReaction(self, rdb):
''' This will edit a reaction with the supplied information '''
reactdata = {
'name': self.name,
'rtype': self.rtype,
'uid': self.uid,
'trigger': self.trigger,
'frequency': self.frequency,
'lastrun': self.lastrun,
'data': self.data}
results = r.table('reactions').get(self.rid).update(reactdata).run(rdb)
if results['replaced'] == 1:
qdata = {}
qdata['item'] = reactdata
qdata['action'] = 'edit'
qdata['type'] = 'reaction'
qdata['item']['rid'] = self.rid
q1 = r.table('dc1queue').insert(qdata).run(rdb)
q2 = r.table('dc2queue').insert(qdata).run(rdb)
return "edit true"
else:
return "edit failed"
def deleteReaction(self, uid, rid, rdb):
''' This will delete a specified reaction '''
reaction = r.table('reactions').get(rid).run(rdb)
if reaction['uid'] == uid:
delete = r.table('reactions').get(rid).delete().run(rdb)
if delete['deleted'] == 1:
qdata = {}
qdata['item'] = reaction
qdata['action'] = 'delete'
qdata['type'] = 'reaction'
qdata['item']['rid'] = rid
q1 = r.table('dc1queue').insert(qdata).run(rdb)
q2 = r.table('dc2queue').insert(qdata).run(rdb)
return True
else:
return False
else:
return False
def exists(self, name, uid, rdb):
'''
This will check to see if the
specified reactions already exists or not
'''
result = r.table('reactions').filter(
{'name': name, 'uid': uid}).count().run(rdb)
if result >= 1:
return True
else:
return False
def getRID(self, searchstring, rdb):
'''
This will lookup a reaction by name and uid (name:uid)
and return the rid
'''
strings = searchstring.split(":")
result = r.table('reactions').filter(
{'name': strings[0], 'uid': strings[1]}).run(rdb)
xdata = {}
for x in result:
key = '{0}:{1}'.format(x['name'], x['uid'])
xdata[key] = x['id']
return xdata[searchstring]
def get(self, method, lookup, rdb):
'''
This will return a reactions
information based on the data provided
'''
if method == 'rid':
rid = lookup
else:
rid = self.getRID(lookup, rdb)
results = r.table('reactions').get(rid).run(rdb)
if results:
self.rid = rid
self.name = results['name']
self.rtype = results['rtype']
self.uid = results['uid']
self.trigger = results['trigger']
self.frequency = results['frequency']
self.lastrun = results['lastrun']
self.data = results['data']
return self
else:
return False
def count(self, uid, rdb):
''' This will return the numerical count of reactions by user id '''
result = r.table('reactions').filter({'uid': uid}).count().run(rdb)
return result
if __name__ == '__main__': # pragma: no cover
pass # pragma: no cover
| codecakes/cloudroutes-service | src/web/reactions.py | Python | agpl-3.0 | 5,105 |
"""
El usuario introduce una serie indefinida de números hasta que se introduzca un cero. El programa ha de imprimir
cuántos números se han introducido, la suma de todos los números, cuántos de esos números son múltiplos de dos,
cuántos son múltiplos de tres y cuántos son múltiplos de dos y de tres.
"""
# Inicializamos las variables
promptMsg = 'Introduce a number:\n'
totalTwo = 0
totalThree = 0
totalBoth = 0
totalSum = 0
totalCount = 0
inNum = float(input(promptMsg))
# Usamos el loop para preguntar cuantos números queremos, usamos float para admitir decimales
while inNum != 0:
# Solución A
"""
if inNum % 2 == 0: # Comprobamos los múltiplos de dos
totalTwo += 1
if inNum % 3 == 0: # Comprobamos los múltiplos de tres
totalThree += 1
if inNum % 2 == 0 and inNum % 3 == 0: # Comprobamos los múltiplos de los dos
totalBoth += 1
"""
# Solución B
if inNum % 2 == 0: # Comprobamos los múltiplos de dos
totalTwo += 1
if inNum % 3 == 0: # Comprobamos los múltiplos de tres
totalBoth += 1
if inNum % 3 == 0: # Comprobamos los múltiplos de tres
totalThree += 1
totalSum += inNum # Sumamos el número al total
totalCount += 1 # Aumentamos en un el total de cuentas
inNum = float(input(promptMsg))
print('Total numbers: \n', totalCount)
print('Total sum: \n', totalSum)
print('Multiples of Two: \n', int(totalTwo))
print('Multiples of Three: \n', int(totalThree))
print('Multiples of Two & Three: \n', int(totalBoth))
"""
Solución usando Listas, así mostramos los números en sí y no las cantidades
# Inicializamos las variables
promptMsg = 'Introduce a number:\n'
repeat = True
numList = []
# Usamos el loop para preguntar cuantos números queremos, usamos float para admitir decimales
while repeat:
inNum = float(input(promptMsg))
if inNum == 0:
repeat = False
else:
numList.append(inNum)
# Inicializamos las listas a devolver
twoNum = []
threeNum = []
bothNum = []
sumNum = 0
# Tratamos cada elemento de la lista total de números
for num in numList:
if num % 2 == 0: # Comprobamos los múltiplos de dos
twoNum.append(num)
if num % 3 == 0: # Comprobamos los múltiplos de tres
threeNum.append(num)
if num % 2 == 0 and num % 3 == 0: # Comprobamos los múltiplos de los dos
bothNum.append(num)
sumNum += num # Sumamos el número al total
# Imprimimos los números pedidos
print('Total numbers: \n', len(numList))
print('Total sum: \n', sumNum)
print('Multiples of Two: \n', twoNum)
print('Multiples of Three: \n', threeNum)
print('Multiples of Two & Three: \n', bothNum)
"""
| IhToN/DAW1-PRG | Ejercicios/PrimTrim/Ejercicio13.py | Python | apache-2.0 | 2,711 |
#!/usr/bin/env python3
import threading
import time
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
from gi.repository import GObject, Gst, GLib
import config
import dispatchers
import utils
from storage.blacklist import BlacklistStorage
from metareader.icecast import IcecastReader
from player_tee import PlayerTee
from PyQt4.QtCore import QTimer
class Player:
def __init__(self):
self._in_ad_block = False
self._last_ad_time = None
self._last_uri = ""
self._last_title = ""
self._just_switched = True
# If running inside PyInstaller, set GST_PLUGIN_PATH
if utils.is_frozen():
utils.set_gst_plugin_path()
# Init timer
self._timer = QTimer()
self._timer.timeout.connect(self.on_timer_check_ad_duration)
self._timer.start(1000)
# Initialize GStreamer
Gst.init(None)
self._tee_bin = PlayerTee()
# Create playbin and add the custom audio sink to it
self._player = Gst.ElementFactory.make("playbin", "player")
self._player.set_property('audio_filter', self._tee_bin.get_bin_element())
# Listen for player events
self._bus = self._player.get_bus()
self._bus.enable_sync_message_emission()
self._bus.add_signal_watch()
self._bus.connect('message::tag', self.on_tag)
# TODO: watch status messages
self._bus.connect("message", self.on_message)
# TODO: connect to events
self._meta_reader = None
"""
Metareader is only used if config.blacklisted_tags have been
configured. Those tags are used to detect beginning and ending
of advertisement blocks
"""
dispatchers.player.play_clicked += self.play
dispatchers.player.pause_clicked += self.stop
dispatchers.player.change_station_clicked += self.on_change_station_clicked
dispatchers.recorder.recording_started += self.on_recording_started
dispatchers.recorder.recording_stopped += self.on_recording_stopped
def get_recorder(self):
return self._tee_bin.get_recorder()
@property
def is_playing(self):
state = self._player.get_state(100)[1]
return state == Gst.State.PLAYING or state == Gst.State.READY
@property
def current_uri(self):
return self._last_uri
@property
def volume(self):
return self._tee_bin.amplification
@volume.setter
def volume(self, value):
self._tee_bin.amplification = value
def on_tag(self, bus, message):
taglist = message.parse_tag()
if not self._meta_reader or not self._meta_reader.is_running:
title = taglist.get_string('title')
if title and title.value:
title = title.value
title = title.strip(" \"'")
self.on_title_read(title)
def on_message(self, bus, message):
t = message.type
if t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
print("Error: %s" % err, debug)
self.stop()
self.play()
elif t == Gst.MessageType.EOS:
self.stop()
elif t == Gst.MessageType.BUFFERING:
# TODO: pause stream
# Check message.buffer_percent
pass
elif t == Gst.MessageType.STATE_CHANGED:
self.fire_state_change()
# Handle song metadata
def on_title_read(self, title):
assert threading.current_thread() == threading.main_thread()
if title is None:
return
if title != self._last_title:
self._last_title = title
# TODO: Fade volume gradually
# TODO: Allow user to choose what to do when an advertisement block is detected.
# Ideas for possible options:
# * reduce or mute volume
# * play random audio file from a local directory
# * switch to another radio station
# * repeat part of last song
print("Title changed to %s" % title)
# If the title contains a blacklisted tag, reduce volume
if BlacklistStorage.is_blacklisted(title):
if not self._in_ad_block:
print('Advertisement tag detected.')
if config.block_mode in (config.BlockMode.REDUCE_VOLUME, config.BlockMode.REDUCE_AND_SWITCH):
print('Reducing volume.')
self.volume = config.ad_block_volume
self._in_ad_block = True
self._last_ad_time = time.time()
elif config.block_mode == config.BlockMode.SWITCH_STATION:
self.switch_to_another_station()
else:
if self._in_ad_block:
print('Restoring volume to maximum.')
if config.block_mode in (config.BlockMode.REDUCE_VOLUME, config.BlockMode.REDUCE_AND_SWITCH):
self.volume = config.max_volume
self._in_ad_block = False
self._last_ad_time = None
self._just_switched = False
dispatchers.player.song_changed(title)
def on_timer_check_ad_duration(self):
if not self._last_ad_time:
return True
duration = time.time() - self._last_ad_time
if self._in_ad_block and self._just_switched:
# If we have just switched to a new station, and this station is also
# in advertisement block, switch immediately again to another one
print("Switch again immediately.")
if config.block_mode in (config.BlockMode.SWITCH_STATION, config.BlockMode.REDUCE_AND_SWITCH):
# Switch to another radio station
self.switch_to_another_station()
elif self._in_ad_block:
print("Ad block with duration of %d seconds." % duration)
if (config.block_mode == config.BlockMode.REDUCE_AND_SWITCH
and duration > config.max_ad_duration):
# Switch to another radio station
self.switch_to_another_station()
else:
# If 10 seconds have passed since last switch of station, reset the timer /
# disable immediate switch to yet another station
if self._just_switched and duration > config.max_ad_duration:
print("Reset just_switched")
self._just_switched = False
return True
def fire_state_change(self):
assert threading.current_thread() == threading.main_thread()
dispatchers.player.playing_state_changed(self.is_playing)
def play(self, uri=""):
self._last_title = ""
# Play last URI, if none provided
if not uri:
uri = self._last_uri
# Automatically extract uri to stream from m3u playlists
stream_uri = uri
stream_uri = utils.read_uri_from_playlist(stream_uri)
# Set URI to online radio
self._player.set_property('uri', stream_uri)
# Start playing
self._player.set_state(Gst.State.PLAYING)
# Reset volume level
self.volume = config.max_volume
if not BlacklistStorage.is_empty():
# TODO: Determine server type and use different reader for each
self._meta_reader = IcecastReader(uri)
self._meta_reader.user_agent = config.user_agent
self._meta_reader.event_title_read += self.on_title_read
self._meta_reader.start()
self.fire_state_change()
if uri != self._last_uri:
self._last_uri = uri
dispatchers.player.station_changed(utils.get_station(uri))
def stop(self):
if self.get_recorder().is_recording:
self.get_recorder().stop()
# Stop metadata reader, if using one
if self._meta_reader:
self._meta_reader.stop()
# Stop playing
self._player.set_state(Gst.State.NULL)
self._in_ad_block = False
self._last_ad_time = None
self._just_switched = False
self._last_title = ""
self.fire_state_change()
def switch_to_another_station(self):
print('Switching to another station.')
other_stations = utils.get_other_stations(self._last_uri)
station = utils.get_random_station(other_stations)
print("Station chosen: %s" % station['name'])
self.stop()
self.play(station['uri'])
self._just_switched = True
self._last_ad_time = time.time()
dispatchers.player.station_changed(station)
def on_change_station_clicked(self, station):
self.stop()
self.play(station["uri"])
dispatchers.player.station_changed(station)
def on_recording_started(self, title):
if config.recording['prerecord']:
# Flush prebuffered part of song to file
self._tee_bin.prerecord_release()
# When a recording branch is attached to tee, playback should be restarted
if self.is_playing:
self._player.set_state(Gst.State.PLAYING)
def on_recording_stopped(self):
if config.recording['prerecord']:
self._tee_bin.prerecord_empty()
self._tee_bin.prerecord_hold()
if self.is_playing:
self._player.set_state(Gst.State.PLAYING)
| quasoft/adblockradio | adblockradio/player.py | Python | gpl-3.0 | 9,517 |
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import time
from spacewalk.common.usix import StringType
from spacewalk.common import rhnLib
from spacewalk.common.rhnLog import log_debug
from spacewalk.server import rhnSQL
class ArrayIterator:
def __init__(self, arr):
self._arr = arr
if self._arr:
self._pos = 0
else:
# Nothing to iterate over
self._pos = None
def get_array(self):
return self._arr
def fetchone_dict(self):
if self._pos is None:
return None
i = self._pos
self._pos = self._pos + 1
if self._pos == len(self._arr):
self._pos = None
return self._arr[i]
class BaseDumper:
# tag_name has to be set in subclasses
def __init__(self, writer, data_iterator=None):
self._writer = writer
self._attributes = {}
self._iterator = data_iterator
# Generic timing function
@staticmethod
def timer(debug_level, message, function, *args, **kwargs):
start = time.time()
result = function(*args, **kwargs)
log_debug(debug_level, message, "timing: %.3f" % (time.time() - start))
return result
def set_attributes(self):
return self._attributes
def set_iterator(self):
return self._iterator
def dump(self):
if not hasattr(self, "tag_name"):
raise Exception("Programmer error: subclass did not set tag_name")
tag_name = getattr(self, "tag_name")
self._attributes = self.set_attributes() or {}
self._iterator = self.timer(5, "set_iterator", self.set_iterator)
if not self._iterator:
self._writer.empty_tag(tag_name, attributes=self._attributes)
return
data_found = 0
while 1:
data = self.timer(6, "fetchone_dict", self._iterator.fetchone_dict)
if not data:
break
if not data_found:
data_found = 1
self._writer.open_tag(tag_name, attributes=self._attributes)
if isinstance(data, StringType):
# The iterator produced some XML dump, just write it
self._writer.stream.write(data)
else:
self.timer(6, "dump_subelement", self.dump_subelement, data)
if data_found:
self._writer.close_tag(tag_name)
else:
self._writer.empty_tag(tag_name, attributes=self._attributes)
def dump_subelement(self, data):
# pylint: disable=R0201
if isinstance(data, BaseDumper):
data.dump()
def get_writer(self):
return self._writer
def set_writer(self, writer):
self._writer = writer
class EmptyDumper(BaseDumper):
def __init__(self, writer, tag_name, attributes=None):
self.tag_name = tag_name
self.attributes = attributes or {}
BaseDumper.__init__(self, writer)
def dump(self):
self._writer.empty_tag(self.tag_name, attributes=self.attributes)
class SimpleDumper(BaseDumper):
def __init__(self, writer, tag_name, value, max_value_bytes=None):
self.tag_name = tag_name
self._value = value
# max number of bytes satellite can handle in the matching db row
self._max_value_bytes = max_value_bytes
BaseDumper.__init__(self, writer)
def dump(self):
self._writer.open_tag(self.tag_name)
if self._value is None:
self._writer.empty_tag('rhn-null')
else:
self._writer.data(self._value)
self._writer.close_tag(self.tag_name)
class BaseRowDumper(BaseDumper):
def __init__(self, writer, row):
BaseDumper.__init__(self, writer)
self._row = row
class BaseChecksumRowDumper(BaseRowDumper):
def set_iterator(self):
# checksums
checksum_arr = [{'type': self._row['checksum_type'],
'value': self._row['checksum']}]
arr = [_ChecksumDumper(self._writer, data_iterator=ArrayIterator(checksum_arr))]
return ArrayIterator(arr)
class BaseQueryDumper(BaseDumper):
iterator_query = None
def set_iterator(self):
if self._iterator:
return self._iterator
h = rhnSQL.prepare(self.iterator_query)
h.execute()
return h
class BaseSubelementDumper(BaseDumper):
# pylint: disable=E1101
subelement_dumper_class = object
def dump_subelement(self, data):
d = self.subelement_dumper_class(self._writer, data)
d.dump()
####
class ExportTypeDumper(BaseDumper):
def __init__(self, writer, start_date=None, end_date=None):
if start_date:
self.type = 'incremental'
else:
self.type = 'full'
self.start_date = start_date
if end_date:
self.end_date = end_date
else:
self.end_date = time.strftime("%Y%m%d%H%M%S")
BaseDumper.__init__(self, writer)
def dump(self):
self._writer.open_tag('export-type')
self._writer.stream.write(self.type)
self._writer.close_tag('export-type')
if self.start_date:
self._writer.open_tag('export-start-date')
self._writer.stream.write(self.start_date)
self._writer.close_tag('export-start-date')
if self.end_date:
self._writer.open_tag('export-end-date')
self._writer.stream.write(self.end_date)
self._writer.close_tag('export-end-date')
class SatelliteDumper(BaseDumper):
tag_name = 'rhn-satellite'
def __init__(self, writer, *dumpers):
BaseDumper.__init__(self, writer)
self._dumpers = dumpers
def set_attributes(self):
return {
'version': 'x.y',
}
def set_iterator(self):
return ArrayIterator(self._dumpers)
class _OrgTrustDumper(BaseDumper):
tag_name = 'rhn-org-trusts'
def dump_subelement(self, data):
c = EmptyDumper(self._writer, 'rhn-org-trust', attributes={
'org-id': data['org_trust_id'],
})
c.dump()
class _OrgDumper(BaseDumper):
tag_name = 'rhn-org'
def __init__(self, writer, org):
self.org = org
BaseDumper.__init__(self, writer)
_query_org_trusts = """
select rto.org_trust_id
from rhnTrustedOrgs rto
where rto.org_id = :org_id
"""
def set_iterator(self):
# trusts
h = rhnSQL.prepare(self._query_org_trusts)
h.execute(org_id=self.org['id'])
return ArrayIterator([_OrgTrustDumper(self._writer, data_iterator=h)])
def set_attributes(self):
attributes = {
'id': self.org['id'],
'name': self.org['name'],
}
return attributes
class OrgsDumper(BaseDumper):
tag_name = 'rhn-orgs'
def __init__(self, writer, data_iterator=None):
BaseDumper.__init__(self, writer, data_iterator)
def dump_subelement(self, data):
org = _OrgDumper(self._writer, data)
org.dump()
class ChannelTrustedOrgsDumper(BaseDumper):
tag_name = 'rhn-channel-trusted-orgs'
def dump_subelement(self, data):
d = EmptyDumper(self._writer, 'rhn-channel-trusted-org',
attributes={'org-id': data['org_trust_id']})
d.dump()
class _ChannelDumper(BaseRowDumper):
tag_name = 'rhn-channel'
def __init__(self, writer, row, start_date=None, end_date=None, use_rhn_date=True, whole_errata=False):
BaseRowDumper.__init__(self, writer, row)
self.start_date = start_date
self.end_date = end_date
self.use_rhn_date = use_rhn_date
self.whole_errata = whole_errata
def set_attributes(self):
channel_id = self._row['id']
packages = ["rhn-package-%s" % x for x in self._get_package_ids()]
# XXX channel-errata is deprecated and should go away in dump version
# 3 or higher - we now dump that information in its own subelement
# rhn-channel-errata
errata = ["rhn-erratum-%s" % x for x in self._get_errata_ids()]
ks_trees = self._get_kickstartable_trees()
return {
'channel-id': 'rhn-channel-%s' % channel_id,
'label': self._row['label'],
'org_id': self._row['org_id'] or "",
'channel-arch': self._row['channel_arch'],
'packages': ' '.join(packages),
'channel-errata': ' '.join(errata),
'kickstartable-trees': ' '.join(ks_trees),
'sharing': self._row['channel_access'],
}
_query_channel_families = rhnSQL.Statement("""
select cf.id, cf.label
from rhnChannelFamily cf, rhnChannelFamilyMembers cfm
where cfm.channel_family_id = cf.id
and cfm.channel_id = :channel_id
""")
_query_dist_channel_map = rhnSQL.Statement("""
select dcm.os, dcm.release, ca.label channel_arch
from rhnDistChannelMap dcm, rhnChannelArch ca
where dcm.channel_id = :channel_id
and dcm.channel_arch_id = ca.id
and dcm.org_id is null
""")
_query_get_channel_trusts = rhnSQL.Statement("""
select org_trust_id
from rhnChannelTrust
where channel_id = :channel_id
""")
def set_iterator(self):
channel_id = self._row['id']
arr = []
mappings = [
('rhn-channel-parent-channel', 'parent_channel'),
('rhn-channel-basedir', 'basedir'),
('rhn-channel-name', 'name'),
('rhn-channel-summary', 'summary'),
('rhn-channel-description', 'description'),
('rhn-channel-gpg-key-url', 'gpg_key_url'),
('rhn-channel-checksum-type', 'checksum_type'),
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
arr.append(SimpleDumper(self._writer, 'rhn-channel-last-modified',
_dbtime2timestamp(self._row['last_modified'])))
channel_product_details = self._get_channel_product_details()
arr.append(SimpleDumper(self._writer, 'rhn-channel-product-name',
channel_product_details[0]))
arr.append(SimpleDumper(self._writer, 'rhn-channel-product-version',
channel_product_details[1]))
arr.append(SimpleDumper(self._writer, 'rhn-channel-product-beta',
channel_product_details[2]))
comp_last_modified = self._channel_comps_last_modified()
if comp_last_modified is not None:
arr.append(SimpleDumper(self._writer, 'rhn-channel-comps-last-modified',
_dbtime2timestamp(comp_last_modified[0])))
h = rhnSQL.prepare(self._query_get_channel_trusts)
h.execute(channel_id=channel_id)
arr.append(ChannelTrustedOrgsDumper(self._writer, data_iterator=h))
h = rhnSQL.prepare(self._query_channel_families)
h.execute(channel_id=channel_id)
arr.append(ChannelFamiliesDumper(self._writer, data_iterator=h,
ignore_subelements=1))
h = rhnSQL.prepare(self._query_dist_channel_map)
h.execute(channel_id=channel_id)
arr.append(DistsDumper(self._writer, h))
# Source package information (with timestamps)
h = self._get_cursor_source_packages()
arr.append(ChannelSourcePackagesDumper(self._writer, h))
# Errata information (with timestamps)
query_args = {'channel_id': channel_id}
if self.start_date:
if self.use_rhn_date:
query = self._query__get_errata_ids_by_rhnlimits
else:
query = self._query__get_errata_ids_by_limits
query_args.update({'lower_limit': self.start_date,
'upper_limit': self.end_date})
else:
query = self._query__get_errata_ids
h = rhnSQL.prepare(query)
h.execute(**query_args)
arr.append(ChannelErrataDumper(self._writer, h))
arr.append(ExportTypeDumper(self._writer, self.start_date, self.end_date))
return ArrayIterator(arr)
_query_get_package_ids = rhnSQL.Statement("""
select package_id as id
from rhnChannelPackage
where channel_id = :channel_id
""")
_query_get_package_ids_by_date_limits = rhnSQL.Statement("""
select package_id as id
from rhnChannelPackage rcp
where rcp.channel_id = :channel_id
and rcp.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and rcp.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""")
_query_get_package_ids_by_rhndate_limits = rhnSQL.Statement("""
select package_id as id
from rhnPackage rp, rhnChannelPackage rcp
where rcp.channel_id = :channel_id
and rcp.package_id = rp.id
and rp.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and rp.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""")
_query_pkgids_by_date_whole_errata = rhnSQL.Statement("""
select rcp.package_id as id
from rhnChannelPackage rcp, rhnPackage rp
left join rhnErrataPackage rep on rp.id = rep.package_id
left join rhnErrata re on rep.errata_id = re.id
where rcp.channel_id = :channel_id
and rcp.package_id = rp.id
and ((re.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and re.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
) or (rep.package_id is NULL
and rcp.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and rcp.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS'))
)
""")
_query_get_pkgids_by_rhndate_whole_errata = rhnSQL.Statement("""
select rcp.package_id as id
from rhnChannelPackage rcp, rhnPackage rp
left join rhnErrataPackage rep on rp.id = rep.package_id
left join rhnErrata re on rep.errata_id = re.id
where rcp.channel_id = :channel_id
and rcp.package_id = rp.id
and ((re.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and re.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
) or (rep.package_id is NULL
and rp.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and rp.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS'))
)
""")
# Things that can be overwriten in subclasses
def _get_package_ids(self):
if self.start_date and self.whole_errata:
return self._get_ids(self._query_pkgids_by_date_whole_errata,
self._query_get_pkgids_by_rhndate_whole_errata,
self._query_get_package_ids)
else:
return self._get_ids(self._query_get_package_ids_by_date_limits,
self._query_get_package_ids_by_rhndate_limits,
self._query_get_package_ids)
def _get_ids(self, query_with_limit, query_with_rhnlimit, query_no_limits):
query_args = {'channel_id': self._row['id']}
if self.start_date:
if self.use_rhn_date:
query = query_with_rhnlimit
else:
query = query_with_limit
query_args.update({'lower_limit': self.start_date,
'upper_limit': self.end_date})
else:
query = query_no_limits
h = rhnSQL.prepare(query)
h.execute(**query_args)
return [x['id'] for x in h.fetchall_dict() or []]
_query_get_source_package_ids = rhnSQL.Statement("""
select distinct ps.id, sr.name source_rpm,
TO_CHAR(ps.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannelPackage cp, rhnPackage p, rhnPackageSource ps,
rhnSourceRPM sr
where cp.channel_id = :channel_id
and cp.package_id = p.id
and p.source_rpm_id = ps.source_rpm_id
and ((p.org_id is null and ps.org_id is null) or
p.org_id = ps.org_id)
and ps.source_rpm_id = sr.id
""")
def _get_cursor_source_packages(self):
channel_id = self._row['id']
h = rhnSQL.prepare(self._query_get_source_package_ids)
h.execute(channel_id=channel_id)
return h
_query__get_errata_ids = rhnSQL.Statement("""
select ce.errata_id as id, e.advisory_name,
TO_CHAR(e.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannelErrata ce, rhnErrata e
where ce.channel_id = :channel_id
and ce.errata_id = e.id
""")
_query__get_errata_ids_by_limits = rhnSQL.Statement("""
%s
and ce.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and ce.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""" % _query__get_errata_ids)
_query__get_errata_ids_by_rhnlimits = rhnSQL.Statement("""
%s
and e.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and e.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""" % _query__get_errata_ids)
def _get_errata_ids(self):
return self._get_ids(self._query__get_errata_ids_by_limits,
self._query__get_errata_ids_by_rhnlimits,
self._query__get_errata_ids)
_query_get_kickstartable_trees = rhnSQL.Statement("""
select kt.label as id
from rhnKickstartableTree kt
where kt.channel_id = :channel_id
and kt.org_id is null
""")
_query_get_kickstartable_trees_by_rhnlimits = rhnSQL.Statement("""
%s
and kt.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and kt.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""" % _query_get_kickstartable_trees)
_query_get_kickstartable_trees_by_limits = rhnSQL.Statement("""
%s
and kt.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and kt.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""" % _query_get_kickstartable_trees)
def _get_kickstartable_trees(self):
ks_trees = self._get_ids(self._query_get_kickstartable_trees_by_limits,
self._query_get_kickstartable_trees_by_rhnlimits,
self._query_get_kickstartable_trees)
ks_trees.sort()
return ks_trees
_query_get_channel_product_details = rhnSQL.Statement("""
select cp.product as name,
cp.version as version,
cp.beta
from rhnChannel c,
rhnChannelProduct cp
where c.id = :channel_id
and c.channel_product_id = cp.id
""")
def _get_channel_product_details(self):
"""
Export rhnChannelProduct table content through ChannelDumper
return a tuple containing (product name, product version, beta status)
or (None, None, None) if the information is missing
"""
channel_id = self._row['id']
h = rhnSQL.prepare(self._query_get_channel_product_details)
h.execute(channel_id=channel_id)
row = h.fetchone_dict()
if not row:
return (None, None, None)
else:
return (row['name'], row['version'], row['beta'])
_query_channel_comps_last_modified = rhnSQL.Statement("""
select to_char(last_modified, 'YYYYMMDDHH24MISS') as comps_last_modified
from rhnChannelComps
where channel_id = :channel_id
order by id desc
""")
def _channel_comps_last_modified(self):
channel_id = self._row['id']
h = rhnSQL.prepare(self._query_channel_comps_last_modified)
h.execute(channel_id=channel_id)
return h.fetchone()
class ChannelsDumper(BaseSubelementDumper):
tag_name = 'rhn-channels'
subelement_dumper_class = _ChannelDumper
def __init__(self, writer, channels=()):
BaseSubelementDumper.__init__(self, writer)
self._channels = channels
def set_iterator(self):
if not self._channels:
# Nothing to do
return
raise NotImplementedError("To be overridden in a child class")
class ChannelDumper(_ChannelDumper):
# pylint: disable=W0231,W0233
def __init__(self, writer, row):
BaseRowDumper.__init__(self, writer, row)
#_query_release_channel_map = rhnSQL.Statement("""
# select dcm.os product, dcm.release version,
# dcm.eus_release release, ca.label channel_arch,
# dcm.is_default is_default
# from rhnDistChannelMap dcm, rhnChannelArch ca
# where dcm.channel_id = :channel_id
# and dcm.channel_arch_id = ca.id
# and dcm.is_eus = 'Y'
#""")
def set_iterator(self):
arrayiterator = _ChannelDumper.set_iterator()
arr = arrayiterator.get_array()
mappings = [
('rhn-channel-receiving-updates', 'receiving_updates'),
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
#channel_id = self._row['id']
# Add EUS info
#h = rhnSQL.prepare(self._query_release_channel_map)
# h.execute(channel_id=channel_id)
#arr.append(ReleaseDumper(self._writer, h))
return arrayiterator
# class ReleaseDumper(BaseDumper):
# tag_name = 'rhn-release'
#
# def dump_subelement(self, data):
# d = _ReleaseDumper(self._writer, data)
# d.dump()
#
# class _ReleaseDumper(BaseRowDumper):
# tag_name = 'rhn-release'
#
# def set_attributes(self):
# return {
# 'product' : self._row['product'],
# 'version' : self._row['version'],
# 'release' : self._row['release'],
# 'channel-arch' : self._row['channel_arch'],
# 'is-default' : self._row['is_default'],
# }
class _ChannelSourcePackageDumper(BaseRowDumper):
tag_name = 'source-package'
def set_attributes(self):
return {
'id': 'rhn-source-package-%s' % self._row['id'],
'source-rpm': self._row['source_rpm'],
'last-modified': _dbtime2timestamp(self._row['last_modified']),
}
class ChannelSourcePackagesDumper(BaseSubelementDumper):
# Dumps the erratum id and the last modified for an erratum in this
# channel
tag_name = 'source-packages'
subelement_dumper_class = _ChannelSourcePackageDumper
class _ChannelErratumDumper(BaseRowDumper):
tag_name = 'erratum'
def set_attributes(self):
return {
'id': 'rhn-erratum-%s' % self._row['id'],
'advisory-name': self._row['advisory_name'],
'last-modified': _dbtime2timestamp(self._row['last_modified']),
}
class ChannelErrataDumper(BaseSubelementDumper):
# Dumps the erratum id and the last modified for an erratum in this
# channel
tag_name = 'rhn-channel-errata'
subelement_dumper_class = _ChannelErratumDumper
class _DistDumper(BaseRowDumper):
tag_name = 'rhn-dist'
def set_attributes(self):
return {
'os': self._row['os'],
'release': self._row['release'],
'channel-arch': self._row['channel_arch'],
}
class DistsDumper(BaseSubelementDumper):
tag_name = 'rhn-dists'
subelement_dumper_class = _DistDumper
class ChannelFamiliesDumper(BaseQueryDumper):
tag_name = 'rhn-channel-families'
iterator_query = 'select cf.* from rhnChannelFamily'
def __init__(self, writer, data_iterator=None, ignore_subelements=0,
null_max_members=1):
BaseQueryDumper.__init__(self, writer, data_iterator=data_iterator)
self._ignore_subelements = ignore_subelements
self._null_max_members = null_max_members
def dump_subelement(self, data):
cf = _ChannelFamilyDumper(self._writer, data,
ignore_subelements=self._ignore_subelements,
null_max_members=self._null_max_members)
cf.dump()
class _ChannelFamilyDumper(BaseRowDumper):
tag_name = 'rhn-channel-family'
def __init__(self, writer, row, ignore_subelements=0, null_max_members=1):
BaseRowDumper.__init__(self, writer, row)
self._ignore_subelements = ignore_subelements
self._null_max_members = null_max_members
def set_iterator(self):
if self._ignore_subelements:
return None
arr = []
mappings = [
('rhn-channel-family-name', 'name'),
('rhn-channel-family-product-url', 'product_url'),
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
return ArrayIterator(arr)
_query_get_channel_family_channels = rhnSQL.Statement("""
select c.label
from rhnChannelFamilyMembers cfm, rhnChannel c
where cfm.channel_family_id = :channel_family_id
and cfm.channel_id = c.id
""")
def set_attributes(self):
# Get all channels that are part of this channel family
h = rhnSQL.prepare(self._query_get_channel_family_channels)
channel_family_id = self._row['id']
h.execute(channel_family_id=channel_family_id)
channels = [x['label'] for x in h.fetchall_dict() or []]
attributes = {
'id': "rhn-channel-family-%s" % channel_family_id,
'label': self._row['label'],
'channel-labels': ' '.join(channels),
}
if self._ignore_subelements:
return attributes
if self._row['label'] != 'rh-public':
if self._null_max_members:
attributes['max-members'] = 0
elif ('max_members' in self._row) and self._row['max_members']:
attributes['max-members'] = self._row['max_members']
return attributes
##
class _PackageDumper(BaseRowDumper):
tag_name = 'rhn-package'
def set_attributes(self):
attrs = ["name", "version", "release", "package_arch",
"package_group", "rpm_version", "package_size", "payload_size",
"installed_size", "build_host", "source_rpm", "payload_format",
"compat"]
attr_dict = {
'id': "rhn-package-%s" % self._row['id'],
'org_id': self._row['org_id'] or "",
'epoch': self._row['epoch'] or "",
'cookie': self._row['cookie'] or "",
'build-time': _dbtime2timestamp(self._row['build_time']),
'last-modified': _dbtime2timestamp(self._row['last_modified']),
}
for attr in attrs:
attr_dict[attr.replace('_', '-')] = self._row[attr]
if self._row['checksum_type'] == 'md5':
# compatibility with older satellite
attr_dict['md5sum'] = self._row['checksum']
return attr_dict
def set_iterator(self):
arr = []
mappings = [
('rhn-package-summary', 'summary'),
('rhn-package-description', 'description'),
('rhn-package-vendor', 'vendor'),
('rhn-package-copyright', 'copyright'),
('rhn-package-header-sig', 'header_sig'),
('rhn-package-header-start', 'header_start'),
('rhn-package-header-end', 'header_end')
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
# checksums
checksum_arr = [{'type': self._row['checksum_type'],
'value': self._row['checksum']}]
arr.append(_ChecksumDumper(self._writer,
data_iterator=ArrayIterator(checksum_arr)))
h = rhnSQL.prepare("""
select
name, text,
TO_CHAR(time, 'YYYYMMDDHH24MISS') as time
from rhnPackageChangeLog
where package_id = :package_id
""")
h.execute(package_id=self._row['id'])
arr.append(_ChangelogDumper(self._writer, data_iterator=h))
# Dependency information
mappings = [
['rhnPackageRequires', 'rhn-package-requires', 'rhn-package-requires-entry'],
['rhnPackageProvides', 'rhn-package-provides', 'rhn-package-provides-entry'],
['rhnPackageConflicts', 'rhn-package-conflicts', 'rhn-package-conflicts-entry'],
['rhnPackageObsoletes', 'rhn-package-obsoletes', 'rhn-package-obsoletes-entry'],
['rhnPackageRecommends', 'rhn-package-recommends', 'rhn-package-recommends-entry'],
['rhnPackageSuggests', 'rhn-package-suggests', 'rhn-package-suggests-entry'],
['rhnPackageSupplements', 'rhn-package-supplements', 'rhn-package-supplements-entry'],
['rhnPackageEnhances', 'rhn-package-enhances', 'rhn-package-enhances-entry'],
['rhnPackageBreaks', 'rhn-package-breaks', 'rhn-package-breaks-entry'],
['rhnPackagePredepends', 'rhn-package-predepends', 'rhn-package-predepends-entry'],
]
for table_name, container_name, entry_name in mappings:
h = rhnSQL.prepare("""
select pc.name, pc.version, pd.sense
from %s pd, rhnPackageCapability pc
where pd.capability_id = pc.id
and pd.package_id = :package_id
""" % table_name)
h.execute(package_id=self._row['id'])
arr.append(_DependencyDumper(self._writer, data_iterator=h,
container_name=container_name,
entry_name=entry_name))
# Files
h = rhnSQL.prepare("""
select
pc.name, pf.device, pf.inode, pf.file_mode, pf.username,
pf.groupname, pf.rdev, pf.file_size,
TO_CHAR(mtime, 'YYYYMMDDHH24MISS') mtime,
c.checksum_type as "checksum-type",
c.checksum, pf.linkto, pf.flags, pf.verifyflags, pf.lang
from rhnPackageFile pf
left join rhnChecksumView c
on pf.checksum_id = c.id,
rhnPackageCapability pc
where pf.capability_id = pc.id
and pf.package_id = :package_id
""")
h.execute(package_id=self._row['id'])
arr.append(_PackageFilesDumper(self._writer, data_iterator=h))
return ArrayIterator(arr)
class PackagesDumper(BaseSubelementDumper, BaseQueryDumper):
tag_name = 'rhn-packages'
subelement_dumper_class = _PackageDumper
def set_iterator(self):
return BaseQueryDumper.set_iterator(self)
##
class ShortPackageEntryDumper(BaseChecksumRowDumper):
tag_name = 'rhn-package-short'
def set_attributes(self):
attr = {
'id': "rhn-package-%s" % self._row['id'],
'name': self._row['name'],
'version': self._row['version'],
'release': self._row['release'],
'epoch': self._row['epoch'] or "",
'package-arch': self._row['package_arch'],
'package-size': self._row['package_size'],
'last-modified': _dbtime2timestamp(self._row['last_modified']),
'org-id': self._row['org_id'] or "",
}
if self._row['checksum_type'] == 'md5':
# compatibility with older satellite
attr['md5sum'] = self._row['checksum']
return attr
class ShortPackagesDumper(BaseSubelementDumper, BaseQueryDumper):
tag_name = 'rhn-packages-short'
subelement_dumper_class = ShortPackageEntryDumper
def set_iterator(self):
return BaseQueryDumper.set_iterator(self)
##
class SourcePackagesDumper(BaseQueryDumper):
tag_name = 'rhn-source-packages'
def dump_subelement(self, data):
attributes = {}
attrs = [
"id", "source_rpm", "package_group", "rpm_version",
"payload_size", "build_host", "sigchecksum_type", "sigchecksum", "vendor",
"cookie", "package_size", "checksum_type", "checksum"
]
for attr in attrs:
attributes[attr.replace('_', '-')] = data[attr]
attributes['id'] = "rhn-source-package-%s" % data['id']
attributes['build-time'] = _dbtime2timestamp(data['build_time'])
attributes['last-modified'] = _dbtime2timestamp(data['last_modified'])
d = EmptyDumper(self._writer, 'rhn-source-package',
attributes=attributes)
d.dump()
##
class _ChecksumDumper(BaseDumper):
tag_name = 'checksums'
def dump_subelement(self, data):
c = EmptyDumper(self._writer, 'checksum', attributes={
'type': data['type'],
'value': data['value'],
})
c.dump()
##
class _ChangelogEntryDumper(BaseRowDumper):
tag_name = 'rhn-package-changelog-entry'
def set_iterator(self):
arr = []
mappings = [
('rhn-package-changelog-entry-name', 'name'),
('rhn-package-changelog-entry-text', 'text'),
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
arr.append(SimpleDumper(self._writer, 'rhn-package-changelog-entry-time',
_dbtime2timestamp(self._row['time'])))
return ArrayIterator(arr)
class _ChangelogDumper(BaseSubelementDumper):
tag_name = 'rhn-package-changelog'
subelement_dumper_class = _ChangelogEntryDumper
##
class _DependencyDumper(BaseDumper):
def __init__(self, writer, data_iterator, container_name, entry_name):
self.tag_name = container_name
self.entry_name = entry_name
BaseDumper.__init__(self, writer, data_iterator=data_iterator)
def dump_subelement(self, data):
d = EmptyDumper(self._writer, self.entry_name, attributes={
'name': data['name'],
'version': data['version'] or "",
'sense': data['sense'],
})
d.dump()
# Files
class _PackageFilesDumper(BaseDumper):
tag_name = 'rhn-package-files'
def dump_subelement(self, data):
data['mtime'] = _dbtime2timestamp(data['mtime'])
data['checksum-type'] = data['checksum-type'] or ""
data['checksum'] = data['checksum'] or ""
if data['checksum-type'] in ('md5', ''):
# generate md5="..." attribute
# for compatibility with older satellites
data['md5'] = data['checksum']
data['linkto'] = data['linkto'] or ""
data['lang'] = data['lang'] or ""
d = EmptyDumper(self._writer, 'rhn-package-file',
attributes=data)
d.dump()
# Errata
class _ErratumDumper(BaseRowDumper):
tag_name = 'rhn-erratum'
def set_attributes(self):
h = rhnSQL.prepare("""
select c.label
from rhnChannelErrata ec, rhnChannel c
where ec.channel_id = c.id
and ec.errata_id = :errata_id
""")
h.execute(errata_id=self._row['id'])
channels = [x['label'] for x in h.fetchall_dict() or []]
h = rhnSQL.prepare("""
select ep.package_id
from rhnErrataPackage ep
where ep.errata_id = :errata_id
""")
h.execute(errata_id=self._row['id'])
packages = ["rhn-package-%s" % x['package_id'] for x in
h.fetchall_dict() or []]
h = rhnSQL.prepare("""
select c.name cve
from rhnErrataCVE ec, rhnCVE c
where ec.errata_id = :errata_id
and ec.cve_id = c.id
""")
h.execute(errata_id=self._row['id'])
cves = [x['cve'] for x in h.fetchall_dict() or []]
return {
'id': 'rhn-erratum-%s' % self._row['id'],
'org_id': self._row['org_id'] or "",
'advisory': self._row['advisory'],
'channels': ' '.join(channels),
'packages': ' '.join(packages),
'cve-names': ' '.join(cves),
}
type_id_column = ""
def set_iterator(self):
arr = []
mappings = [
('rhn-erratum-advisory-name', 'advisory_name', 100),
('rhn-erratum-advisory-rel', 'advisory_rel', 32),
('rhn-erratum-advisory-type', 'advisory_type', 32),
('rhn-erratum-product', 'product', 64),
('rhn-erratum-description', 'description', 4000),
('rhn-erratum-synopsis', 'synopsis', 4000),
('rhn-erratum-topic', 'topic', 4000),
('rhn-erratum-solution', 'solution', 4000),
('rhn-erratum-refers-to', 'refers_to', 4000),
('rhn-erratum-notes', 'notes', 4000),
('rhn-erratum-errata-from', 'errata_from', 127),
]
for k, v, b in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v] or "", b))
arr.append(SimpleDumper(self._writer, 'rhn-erratum-issue-date',
_dbtime2timestamp(self._row['issue_date'])))
arr.append(SimpleDumper(self._writer, 'rhn-erratum-update-date',
_dbtime2timestamp(self._row['update_date'])))
arr.append(SimpleDumper(self._writer, 'rhn-erratum-last-modified',
_dbtime2timestamp(self._row['last_modified'])))
h = rhnSQL.prepare("""
select keyword
from rhnErrataKeyword
where errata_id = :errata_id
""")
h.execute(errata_id=self._row['id'])
arr.append(_ErratumKeywordDumper(self._writer, data_iterator=h))
h = rhnSQL.prepare("""
select bug_id, summary, href
from rhnErrataBuglist
where errata_id = :errata_id
""")
h.execute(errata_id=self._row['id'])
arr.append(_ErratumBuglistDumper(self._writer, data_iterator=h))
_query_errata_file_info = """
select ef.id errata_file_id, c.checksum_type, c.checksum,
ef.filename, eft.label as type,
efp.package_id, efps.package_id as source_package_id
from rhnErrataFile ef left outer join rhnErrataFilePackage efp on ef.id = efp.errata_file_id
left outer join rhnErrataFilePackageSource efps on ef.id = efps.errata_file_id,
rhnErrataFileType eft, rhnChecksumView c
where ef.errata_id = :errata_id
and ef.type = eft.id
and ef.checksum_id = c.id
%s
"""
h = rhnSQL.prepare(_query_errata_file_info % self.type_id_column)
h.execute(errata_id=self._row['id'])
arr.append(_ErratumFilesDumper(self._writer, data_iterator=h))
return ArrayIterator(arr)
class ErrataDumper(BaseSubelementDumper):
tag_name = 'rhn-errata'
subelement_dumper_class = _ErratumDumper
def set_iterator(self):
if self._iterator:
return self._iterator
raise NotImplementedError("To be overridden in a child class")
class _ErratumKeywordDumper(BaseDumper):
tag_name = 'rhn-erratum-keywords'
def dump_subelement(self, data):
d = SimpleDumper(self._writer, 'rhn-erratum-keyword', data['keyword'])
d.dump()
class _ErratumBugDumper(BaseRowDumper):
tag_name = 'rhn-erratum-bug'
def set_iterator(self):
arr = [
SimpleDumper(self._writer, 'rhn-erratum-bug-id', self._row['bug_id']),
SimpleDumper(self._writer, 'rhn-erratum-bug-summary',
self._row['summary'] or ""),
SimpleDumper(self._writer, 'rhn-erratum-bug-href', self._row['href']),
]
return ArrayIterator(arr)
class _ErratumBuglistDumper(BaseSubelementDumper):
tag_name = 'rhn-erratum-bugs'
subelement_dumper_class = _ErratumBugDumper
class _ErratumFileEntryDumper(BaseChecksumRowDumper):
tag_name = 'rhn-erratum-file'
def set_attributes(self):
attributes = {
'filename': self._row['filename'][:4000],
'type': self._row['type'],
}
if self._row['checksum_type'] == 'md5':
attributes['md5sum'] = self._row['checksum']
# Compute the channels for this file
h = rhnSQL.prepare("""
select c.label
from rhnErrataFileChannel efc, rhnChannel c
where efc.errata_file_id = :errata_file_id
and efc.channel_id = c.id
""")
h.execute(errata_file_id=self._row['errata_file_id'])
channels = ' '.join(
[x['label'] for x in h.fetchall_dict() or []])
if channels:
attributes['channels'] = channels
# Get the package id or source_package_id
if self._row['type'] == 'RPM':
package_id = self._row['package_id']
if package_id is not None:
attributes['package'] = 'rhn-package-%s' % package_id
elif self._row['type'] == 'SRPM':
package_id = self._row['source_package_id']
if package_id is not None:
attributes['source-package'] = 'rhn-package-source-%s' % package_id
return attributes
class _ErratumFilesDumper(BaseSubelementDumper):
tag_name = 'rhn-erratum-files'
subelement_dumper_class = _ErratumFileEntryDumper
# Arches
class BaseArchesDumper(BaseDumper):
table_name = 'foo'
subelement_tag = 'foo'
def set_iterator(self):
h = rhnSQL.prepare("""
select id, label, name
from %s
""" % self.table_name)
h.execute()
return h
def dump_subelement(self, data):
attributes = {
'id': "%s-id-%s" % (self.subelement_tag, data['id']),
'label': data['label'],
'name': data['name'],
}
EmptyDumper(self._writer, self.subelement_tag, attributes).dump()
class RestrictedArchesDumper(BaseArchesDumper):
def __init__(self, writer, data_iterator=None, rpm_arch_type_only=0):
BaseArchesDumper.__init__(self, writer=writer,
data_iterator=data_iterator)
self.rpm_arch_type_only = rpm_arch_type_only
def set_iterator(self):
query_templ = """
select aa.id, aa.label, aa.name,
at.label arch_type_label, at.name arch_type_name
from %s aa,
rhnArchType at
where aa.arch_type_id = at.id
%s
"""
if self.rpm_arch_type_only:
h = rhnSQL.prepare(query_templ % (self.table_name, "and at.label = 'rpm'"))
else:
h = rhnSQL.prepare(query_templ % (self.table_name, ""))
h.execute()
return h
def dump_subelement(self, data):
attributes = {
'id': "%s-id-%s" % (self.subelement_tag, data['id']),
'label': data['label'],
'name': data['name'],
'arch-type-label': data['arch_type_label'],
'arch-type-name': data['arch_type_name'],
}
EmptyDumper(self._writer, self.subelement_tag, attributes).dump()
class ChannelArchesDumper(RestrictedArchesDumper):
tag_name = 'rhn-channel-arches'
subelement_tag = 'rhn-channel-arch'
table_name = 'rhnChannelArch'
class PackageArchesDumper(RestrictedArchesDumper):
tag_name = 'rhn-package-arches'
subelement_tag = 'rhn-package-arch'
table_name = 'rhnPackageArch'
class ServerArchesDumper(RestrictedArchesDumper):
tag_name = 'rhn-server-arches'
subelement_tag = 'rhn-server-arch'
table_name = 'rhnServerArch'
class CPUArchesDumper(BaseArchesDumper):
tag_name = 'rhn-cpu-arches'
subelement_tag = 'rhn-cpu-arch'
table_name = 'rhnCPUArch'
class RestrictedArchCompatDumper(BaseArchesDumper):
_query_rpm_arch_type_only = ""
_query_arch_type_all = ""
_subelement_tag = ""
def __init__(self, writer, data_iterator=None, rpm_arch_type_only=0, virt_filter=0):
BaseArchesDumper.__init__(self, writer=writer,
data_iterator=data_iterator)
self.rpm_arch_type_only = rpm_arch_type_only
self.virt_filter = virt_filter
def set_iterator(self):
_virt_filter_sql = ""
if self.virt_filter:
_virt_filter_sql = """and sgt.label not like 'virt%'"""
if self._subelement_tag == 'rhn-server-group-server-arch-compat':
if self.rpm_arch_type_only:
h = rhnSQL.prepare(self._query_rpm_arch_type_only % _virt_filter_sql)
else:
h = rhnSQL.prepare(self._query_arch_type_all % _virt_filter_sql)
else:
if self.rpm_arch_type_only:
h = rhnSQL.prepare(self._query_rpm_arch_type_only)
else:
h = rhnSQL.prepare(self._query_arch_type_all)
h.execute()
return h
def dump_subelement(self, data):
EmptyDumper(self._writer, self._subelement_tag, data).dump()
class ServerPackageArchCompatDumper(RestrictedArchCompatDumper):
tag_name = 'rhn-server-package-arch-compatibility-map'
_subelement_tag = 'rhn-server-package-arch-compat'
_query_rpm_arch_type_only = rhnSQL.Statement("""
select sa.label "server-arch",
pa.label "package-arch",
spac.preference
from rhnServerPackageArchCompat spac,
rhnServerArch sa,
rhnPackageArch pa,
rhnArchType aas,
rhnArchType aap
where spac.server_arch_id = sa.id
and spac.package_arch_id = pa.id
and sa.arch_type_id = aas.id
and aas.label = 'rpm'
and pa.arch_type_id = aap.id
and aap.label = 'rpm'
""")
_query_arch_type_all = rhnSQL.Statement("""
select sa.label "server-arch",
pa.label "package-arch",
spac.preference
from rhnServerPackageArchCompat spac,
rhnServerArch sa,
rhnPackageArch pa
where spac.server_arch_id = sa.id
and spac.package_arch_id = pa.id
""")
class ServerChannelArchCompatDumper(RestrictedArchCompatDumper):
tag_name = 'rhn-server-channel-arch-compatibility-map'
_subelement_tag = 'rhn-server-channel-arch-compat'
_query_rpm_arch_type_only = rhnSQL.Statement("""
select sa.label "server-arch",
ca.label "channel-arch"
from rhnServerChannelArchCompat scac,
rhnServerArch sa,
rhnChannelArch ca,
rhnArchType aas,
rhnArchType aac
where scac.server_arch_id = sa.id
and scac.channel_arch_id = ca.id
and sa.arch_type_id = aas.id
and aas.label = 'rpm'
and ca.arch_type_id = aac.id
and aac.label = 'rpm'
""")
_query_arch_type_all = rhnSQL.Statement("""
select sa.label "server-arch",
ca.label "channel-arch"
from rhnServerChannelArchCompat scac,
rhnServerArch sa,
rhnChannelArch ca
where scac.server_arch_id = sa.id
and scac.channel_arch_id = ca.id
""")
class ChannelPackageArchCompatDumper(RestrictedArchCompatDumper):
tag_name = 'rhn-channel-package-arch-compatibility-map'
_subelement_tag = 'rhn-channel-package-arch-compat'
_query_rpm_arch_type_only = rhnSQL.Statement("""
select ca.label "channel-arch",
pa.label "package-arch"
from rhnChannelPackageArchCompat cpac,
rhnChannelArch ca,
rhnPackageArch pa,
rhnArchType aac,
rhnArchType aap
where cpac.channel_arch_id = ca.id
and cpac.package_arch_id = pa.id
and ca.arch_type_id = aac.id
and aac.label = 'rpm'
and pa.arch_type_id = aap.id
and aap.label = 'rpm'
""")
_query_arch_type_all = rhnSQL.Statement("""
select ca.label "channel-arch",
pa.label "package-arch"
from rhnChannelPackageArchCompat cpac,
rhnChannelArch ca,
rhnPackageArch pa
where cpac.channel_arch_id = ca.id
and cpac.package_arch_id = pa.id
""")
class ServerGroupTypeServerArchCompatDumper(RestrictedArchCompatDumper):
tag_name = 'rhn-server-group-server-arch-compatibility-map'
_subelement_tag = 'rhn-server-group-server-arch-compat'
_query_rpm_arch_type_only = """
select sgt.label "server-group-type",
sa.label "server-arch"
from rhnServerGroupType sgt,
rhnServerArch sa,
rhnArchType aas,
rhnServerServerGroupArchCompat ssgac
where ssgac.server_arch_id = sa.id
and sa.arch_type_id = aas.id
and aas.label = 'rpm'
and ssgac.server_group_type = sgt.id
%s
"""
#_query_arch_type_all = rhnSQL.Statement("""
_query_arch_type_all = """
select sgt.label "server-group-type",
sa.label "server-arch"
from rhnServerGroupType sgt,
rhnServerArch sa,
rhnServerServerGroupArchCompat ssgac
where ssgac.server_arch_id = sa.id
and ssgac.server_group_type = sgt.id
%s
"""
class BlacklistObsoletesDumper(BaseDumper):
tag_name = 'rhn-blacklist-obsoletes'
def dump(self):
note = """\n<!-- This file is intentionally left empty.
Older Satellites and Spacewalks require this file to exist in the dump. -->\n"""
self._writer.stream.write(note)
self._writer.empty_tag(self.tag_name)
class _KickstartableTreeDumper(BaseRowDumper):
tag_name = 'rhn-kickstartable-tree'
def set_attributes(self):
row_dict = self._row.copy()
del row_dict['id']
last_modified = row_dict['last-modified']
row_dict['last-modified'] = _dbtime2timestamp(last_modified)
return row_dict
def set_iterator(self):
kstree_id = self._row['id']
h = rhnSQL.prepare("""
select relative_filename,
c.checksum_type,
c.checksum,
file_size,
TO_CHAR(last_modified, 'YYYYMMDDHH24MISS') "last-modified"
from rhnKSTreeFile, rhnChecksumView c
where kstree_id = :kstree_id
and checksum_id = c.id
""")
h.execute(kstree_id=kstree_id)
return ArrayIterator([_KickstartFilesDumper(self._writer, h)])
class KickstartableTreesDumper(BaseSubelementDumper, BaseQueryDumper):
tag_name = 'rhn-kickstartable-trees'
subelement_dumper_class = _KickstartableTreeDumper
iterator_query = """
select kt.id,
c.label channel,
kt.base_path "base-path",
kt.label,
kt.boot_image "boot-image",
ktt.name "kstree-type-name",
ktt.label "kstree-type-label",
kit.name "install-type-name",
kit.label "install-type-label",
TO_CHAR(kt.last_modified, 'YYYYMMDDHH24MISS') "last-modified"
from rhnKickstartableTree kt,
rhnKSTreeType ktt,
rhnKSInstallType kit,
rhnChannel c
where kt.channel_id = c.id
and ktt.id = kt.kstree_type
and kit.id = kt.install_type
and kt.org_id is NULL
"""
def set_iterator(self):
return BaseQueryDumper.set_iterator(self)
class _KickstartFileEntryDumper(BaseChecksumRowDumper):
tag_name = 'rhn-kickstart-file'
def set_attributes(self):
attr = {
'relative-path': self._row['relative_filename'],
'file-size': self._row['file_size'],
'last-modified': _dbtime2timestamp(self._row['last-modified']),
}
if self._row['checksum_type'] == 'md5':
attr['md5sum'] = self._row['checksum']
return attr
class _KickstartFilesDumper(BaseSubelementDumper):
tag_name = 'rhn-kickstart-files'
subelement_dumper_class = _KickstartFileEntryDumper
def _dbtime2timestamp(val):
return int(rhnLib.timestamp(val))
class ProductNamesDumper(BaseDumper):
tag_name = "rhn-product-names"
def dump_subelement(self, data):
EmptyDumper(self._writer, 'rhn-product-name', data).dump()
| ogajduse/spacewalk | backend/satellite_tools/exporter/exportLib.py | Python | gpl-2.0 | 53,448 |
"""
[2017-04-07] Challenge #309 [Hard] Patterns overlap
https://www.reddit.com/r/dailyprogrammer/comments/641zpj/20170407_challenge_309_hard_patterns_overlap/
Taken from practice problem for google code jam (which starts tonight)
Input consists of 2 strings, where:
* each string may include `*` wildcard(s)
* `*` wildcards may be substituted with any string of length 0 to 4
The challenge is to return `True` if there exists a substitution of `*`s in both strings that make the 2 strings
identical.
Sample:
Shakes*e
S*speare
**output:**
True - 1st string can replace `*` with `pear` and 2nd string can replace `*` with `hake`
**sample 2:**
a*baa**ba**aa
*ca*b**a*baac
can be quickly determined false in that the first string cannot be made to end in c.
a*baa**ba**aa
*ca*b**a*baaa
True: both strings can be made into `acabaabaaa`
**Challenges:**
bb*aaaaa*ba**
*baabb*b*aaaa
dnKeeuCCyHOnobnDYMGoXDdNWhTsaoedbPifJ*ki*wWfXjIUwqItTmGqtAItoNWpDeUnNCWgZsKWbuQxKaqemXuFXDylQubuZWhMyDsXvDSwYjui*LviGAEkyQbtR*cELfxiAbbYyJRGtcsoJZppINgJGYeZKGeWLbenBEKaoCgheYwOxLeFZJPGhTFRAjNn
d*eeuCCyHOnobnDYMGoXDdNWhTsaoedbP*ijrwWfXjIUwqItTmGqtAItoNWpDeUnNCWgZs*WbuQxKaqemXuFXDylQubuZWhMyDsXvDSwYjuijkLviGAEkyQbtRUsncELfxiAbbYyJRG*soJZppINgJGYeZKGeWLbenBEKaoCghe*YwOxLeFZJPGhTFRAjN
THAkZYrkUWgcTpZ*SsNQKsEnvdUveZxssEtCEQuoMqToJjMdCatMs*v*GyMlROpiIDUZyJjhwmjxFWpEwDgRLlLsJYebMSkwxEUvoDcLPLIwHY*GvoRhgcfkdsenObSjWGNYRDJAzRzavAGRoZZ*fDXIRlJkufqHDjLMJKEjLAkRRyQqTrUaWRIndS
*THAkZYrkUWgcTpZSsNQKsEnvdUveZxssEtCEQuoMqToJjMdCatMsYa*nBvIFuGyMlROpiIDUZyJjh*FWpEwDgRLlLsJYebMSkw*oDcLPLIwHYbeBGvoRhgcfkdsenObSjWGNYRDJAzRzavAGRoZZvbEfDXIRlJkufqHDjLMJKEjLAkRRyQqTrU*aWRIndS
jEAmXdDUtthXNLbIZFeWdiQPGEvyCEeLI**EyficABUH*YiSZRREvniDexKJSjLXMYfsw*YlbTSZBlYSecorJsWidfALQYzOdrKNrJZRdrQEDoyhPMYAfTiHZIuqGtEkKqYBzxtCOJhRYfZNSYNxRWFrfahlSLvdBTebrXDgGlZEqxRIvGhN*mfhLLSExNHaHLAZ
jEAmXdDUtthXNLbIZFeWdiQPGEvyCEeL**BUHYiSZRREvniDexKJSjLXMYfswlaYlbTSZBlYSecorJsWidfALQYzOdrKNrJZ*EDoyhPMYAfTiHZIuqGtEkKqYBzxtC*YfZNSYNxRWFrfahlSLvdBT*ebrXDgGlZEqxRIvGhNcmfhLLSExNHaHLAZ
"""
def main():
pass
if __name__ == "__main__":
main()
| DayGitH/Python-Challenges | DailyProgrammer/DP20170407C.py | Python | mit | 2,146 |
# -*- coding: utf-8 -*-
__author__ = 'jb'
class FieldRegistry(object):
_registry = {}
def add_field(self, model, field):
reg = self.__class__._registry.setdefault(model, [])
reg.append(field)
def get_fields(self, model):
return self.__class__._registry.get(model, [])
def __contains__(self, model):
return model in self.__class__._registry
| wnt-zhp/hufce | current_user/registration.py | Python | gpl-3.0 | 393 |
import os
import sys
from pathlib import Path
from setuptools import find_packages, setup
from setuptools.command.install import install
VERSION = Path(Path(__file__).parent, "quilt3", "VERSION").read_text().strip()
def readme():
readme_short = """
Quilt manages data like code (with packages, repositories, browsing and
revision history) so that teams can experiment faster in machine learning,
biotech, and other data-driven domains.
The `quilt3` PyPi package allows you to build, push, and install data packages.
Visit the `documentation quickstart <https://docs.quiltdata.com/quickstart>`_
to learn more.
"""
return readme_short
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="quilt3",
version=VERSION,
packages=find_packages(),
description='Quilt: where data comes together',
long_description=readme(),
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
author='quiltdata',
author_email='contact@quiltdata.io',
license='Apache-2.0',
url='https://github.com/quiltdata/quilt',
keywords='',
install_requires=[
'appdirs>=1.4.0',
'aws-requests-auth>=0.4.2',
'boto3>=1.10.0',
'dnspython>=1.16.0',
'flask',
'flask_cors',
'flask_json',
'jsonlines==1.2.0',
'PyYAML>=5.1',
'requests>=2.12.4',
'tenacity>=5.1.1',
'tqdm>=4.32',
'requests_futures==1.0.0',
'jsonschema==3.*',
],
extras_require={
'pyarrow': [
'numpy>=1.14.0', # required by pandas, but missing from its dependencies.
'pandas>=0.19.2',
'pyarrow>=0.14.1', # as of 7/5/19: linux/circleci bugs on 0.14.0
],
'tests': [
'urllib3<1.26,>=1.25.4' # https://github.com/quiltdata/quilt/pull/1903
'numpy>=1.14.0', # required by pandas, but missing from its dependencies.
'pandas>=0.19.2',
'pyarrow>=0.14.1', # as of 7/5/19: linux/circleci bugs on 0.14.0
'pytest<5.1.0', # TODO: Fix pytest.ensuretemp in conftest.py
'pytest-cov',
'pytest-env',
'pytest-subtests',
'responses',
'git-pylint-commit-hook',
],
},
include_package_data=True,
entry_points={
'console_scripts': ['quilt3=quilt3.main:main'],
},
cmdclass={
'verify': VerifyVersionCommand,
}
)
| quiltdata/quilt-compiler | api/python/setup.py | Python | apache-2.0 | 3,232 |
from django.urls import path
from ..views import (
MyRequestsView,
DraftRequestsView,
FoiProjectListView,
FollowingRequestsView,
RequestSubscriptionsView,
)
urlpatterns = [
path("requests/", MyRequestsView.as_view(), name="account-requests"),
path("drafts/", DraftRequestsView.as_view(), name="account-drafts"),
path("projects/", FoiProjectListView.as_view(), name="account-projects"),
path("following/", FollowingRequestsView.as_view(), name="account-following"),
path(
"subscriptions/",
RequestSubscriptionsView.as_view(),
name="account-subscriptions",
),
]
| fin/froide | froide/foirequest/urls/account_urls.py | Python | mit | 630 |
#
# Copyright (C) 2011-2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""
Module providing the LangLocaleHandler class that could be used as a mixin for
screens handling languages or locales configuration.
"""
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("Pango", "1.0")
gi.require_version("GdkPixbuf", "2.0")
from gi.repository import Gtk, Pango, GdkPixbuf
from pyanaconda import localization
from pyanaconda.core.util import strip_accents
from pyanaconda.ui.gui.utils import set_treeview_selection, timed_action, override_cell_property
class LangLocaleHandler(object):
"""
Class that could be used as a mixin for screens handling languages or
locales configuration.
"""
def __init__(self, payload):
# the class inheriting from this class is responsible for populating
# these items with actual objects
self._languageStore = None
self._languageStoreFilter = None
self._languageEntry = None
self._langSelection = None
self._langSelectedRenderer = None
self._langSelectedColumn = None
self._langView = None
self._localeView = None
self._localeStore = None
self._localeSelection = None
self._right_arrow = None
self._left_arrow = None
self.payload = payload
def initialize(self):
# Load arrows from resources. Unfortunately, Gtk.Image.new_from_resource does not
# work for some reason, so we should use GdkPixbuf.Pixbuf.new_from_resource instead.
resource_path = "/org/fedoraproject/anaconda/widgets/"
self._right_arrow = GdkPixbuf.Pixbuf.new_from_resource(resource_path + "right-arrow-icon.png")
self._left_arrow = GdkPixbuf.Pixbuf.new_from_resource(resource_path + "left-arrow-icon.png")
# Render an arrow for the chosen language.
override_cell_property(self._langSelectedColumn, self._langSelectedRenderer,
"pixbuf", self._render_lang_selected)
# fill the list with available translations
langs = localization.get_available_translations()
langs = self._filter_languages(langs)
for lang in langs:
self._add_language(self._languageStore,
localization.get_native_name(lang),
localization.get_english_name(lang), lang)
# make filtering work
self._languageStoreFilter.set_visible_func(self._matches_entry, None)
def _matches_entry(self, model, itr, *args):
# Nothing in the text entry? Display everything.
entry = self._languageEntry.get_text().strip()
if not entry:
return True
# Need to strip out the pango markup before attempting to match.
# Otherwise, starting to type "span" for "spanish" will match everything
# due to the enclosing span tag.
# (success, attrs, native, accel)
native = Pango.parse_markup(model[itr][0], -1, "_")[2]
english = model[itr][1]
# Otherwise, filter the list showing only what is matched by the
# text entry. Either the English or native names can match.
lowered = entry.lower()
translit = strip_accents(native).lower()
if lowered in native.lower() or lowered in english.lower() or lowered in translit:
return True
else:
return False
def _render_lang_selected(self, column, renderer, model, itr, user_data=None):
(lang_store, sel_itr) = self._langSelection.get_selected()
if Gtk.get_locale_direction() == Gtk.TextDirection.LTR:
_arrow = self._right_arrow
else:
_arrow = self._left_arrow
if sel_itr and lang_store[sel_itr][2] == model[itr][2]:
return _arrow
else:
return None
def _filter_languages(self, langs):
"""Override this method with a valid implementation"""
return list(filter(self.payload.is_language_supported, langs))
def _add_language(self, store, native, english, lang):
"""Override this method with a valid implementation"""
raise NotImplementedError()
def _filter_locales(self, lang, locales):
"""Override this method with a valid implementation"""
return list(filter(lambda l: self.payload.is_locale_supported(lang, l), locales))
def _add_locale(self, store, native, locale):
"""Override this method with a valid implementation"""
raise NotImplementedError()
def _select_locale(self, locale):
"""
Try to select the given locale in the language and locale
treeviews. This method tries to find the best match for the given
locale.
:return: a pair of selected iterators (language and locale)
:rtype: a pair of GtkTreeIter or None objects
"""
# get lang and select it
language = localization.get_language_id(locale)
if not language:
# invalid locale, cannot select
return (None, None)
lang_itr = set_treeview_selection(self._langView, language, col=2)
# find matches and use the one with the highest rank
locales = localization.get_language_locales(locale)
locale_itr = set_treeview_selection(self._localeView, locales[0], col=1)
return (lang_itr, locale_itr)
def _refresh_locale_store(self, lang):
"""Refresh the localeStore with locales for the given language."""
self._localeStore.clear()
locales = localization.get_language_locales(lang)
locales = self._filter_locales(lang, locales)
for locale in locales:
self._add_locale(self._localeStore,
localization.get_native_name(locale),
locale)
# select the first locale (with the highest rank)
set_treeview_selection(self._localeView, locales[0], col=1)
def on_lang_selection_changed(self, selection):
(store, selected) = selection.get_selected_rows()
if selected:
lang = store[selected[0]][2]
self._refresh_locale_store(lang)
else:
self._localeStore.clear()
def on_clear_icon_clicked(self, entry, icon_pos, event):
if icon_pos == Gtk.EntryIconPosition.SECONDARY:
entry.set_text("")
@timed_action()
def on_entry_changed(self, *args):
self._languageStoreFilter.refilter()
| atodorov/anaconda | pyanaconda/ui/gui/spokes/lib/lang_locale_handler.py | Python | gpl-2.0 | 7,380 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointsOperations(object):
"""PrivateEndpointsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified private endpoint.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpoint"
"""Gets the specified private endpoint by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpoint, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.PrivateEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
parameters, # type: "_models.PrivateEndpoint"
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpoint"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpoint')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpoint', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
parameters, # type: "_models.PrivateEndpoint"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PrivateEndpoint"]
"""Creates or updates an private endpoint in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:param parameters: Parameters supplied to the create or update private endpoint operation.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.PrivateEndpoint
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpoint or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_02_01.models.PrivateEndpoint]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpoint"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PrivateEndpointListResult"]
"""Gets all private endpoints in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.PrivateEndpointListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints'} # type: ignore
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PrivateEndpointListResult"]
"""Gets all private endpoints in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.PrivateEndpointListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/privateEndpoints'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/operations/_private_endpoints_operations.py | Python | mit | 24,580 |
from dragonfly import (ActionBase)
from caster.lib import utilities, control
if control.nexus().dep.NATLINK:
import natlink
class SelectiveAction(ActionBase):
def __init__(self, action, executables, negate=True):
'''
action: another Dragonfly action
executables: an array of strings, each of which is the name of an executable
negate: if True, the action should not occur during any of the listed executables, if false the opposite
'''
ActionBase.__init__(self)
self.action = action
self.executables = executables
self.negate = negate
def _execute(self, data=None):
if control.nexus().dep.NATLINK:
executable = utilities.get_active_window_path(natlink).split("\\")[-1]
is_executable = executable in self.executables
if (is_executable and not self.negate) or (self.negate and not is_executable):
self.action._execute()
else:
utilities.availability_message("SelectiveAction", "natlink")
self.action._execute()
| j127/caster | caster/lib/dfplus/additions.py | Python | lgpl-3.0 | 1,102 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for adding and editing sheriff rotations."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import json
from google.appengine.api import users
from google.appengine.ext import ndb
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.common import xsrf
from dashboard.models import table_config
class CreateHealthReportHandler(request_handler.RequestHandler):
def get(self):
"""Renders the UI with the form fields."""
self.RenderStaticHtml('create_health_report.html')
def post(self):
"""POSTS the data to the datastore."""
user = users.get_current_user()
if not user:
self.response.out.write(json.dumps({'error': 'User not logged in.'}))
return
if not utils.IsInternalUser():
self.response.out.write(
json.dumps({
'error':
'Unauthorized access, please use chromium account to login.'
}))
return
get_token = self.request.get('getToken')
get_table_config_list = self.request.get('getTableConfigList')
get_table_config_details = self.request.get('getTableConfigDetails')
if get_token == 'true':
values = {}
self.GetDynamicVariables(values)
self.response.out.write(
json.dumps({
'xsrf_token': values['xsrf_token'],
}))
elif get_table_config_list:
self._GetTableConfigList()
elif get_table_config_details:
self._GetTableConfigDetails(get_table_config_details)
else:
self._CreateTableConfig()
def _GetTableConfigList(self):
query = table_config.TableConfig.query()
table_config_list = query.fetch(keys_only=True)
return_list = []
for config in table_config_list:
return_list.append(config.id())
self.response.out.write(json.dumps({
'table_config_list': return_list,
}))
def _GetTableConfigDetails(self, config_name):
config_entity = ndb.Key('TableConfig', config_name).get()
if config_entity:
master_bot_list = []
for bot in config_entity.bots:
master_bot_list.append(bot.parent().string_id() + '/' + bot.string_id())
self.response.out.write(
json.dumps({
'table_name': config_name,
'table_bots': master_bot_list,
'table_tests': config_entity.tests,
'table_layout': config_entity.table_layout
}))
else:
self.response.out.write(json.dumps({'error': 'Invalid config name.'}))
def _CreateTableConfig(self):
"""Creates a table config. Writes a valid name or an error message."""
self._ValidateToken()
name = self.request.get('tableName')
master_bot = self.request.get('tableBots').splitlines()
tests = self.request.get('tableTests').splitlines()
table_layout = self.request.get('tableLayout')
override = int(self.request.get('override'))
user = users.get_current_user()
if not name or not master_bot or not tests or not table_layout or not user:
self.response.out.write(
json.dumps({'error': 'Please fill out the form entirely.'}))
return
try:
created_table = table_config.CreateTableConfig(
name=name,
bots=master_bot,
tests=tests,
layout=table_layout,
username=user.email(),
override=override)
except table_config.BadRequestError as e:
self.response.out.write(json.dumps({
'error': str(e),
}))
logging.error('BadRequestError: %r', str(e))
return
if created_table:
self.response.out.write(json.dumps({
'name': name,
}))
else:
self.response.out.write(
json.dumps({
'error': 'Could not create table.',
}))
logging.error('Could not create table.')
def _ValidateToken(self):
user = users.get_current_user()
token = str(self.request.get('xsrf_token'))
if not user or not xsrf._ValidateToken(token, user):
self.abort(403)
| catapult-project/catapult | dashboard/dashboard/create_health_report.py | Python | bsd-3-clause | 4,261 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from onixcheck.__main__ import main
from onixcheck import data
def test_main_o3_ref_valid(capsys):
argv = [data.VALID_ONIX2_REF]
exit_code = main(argv)
out, err = capsys.readouterr()
assert 'VALID' in out
assert 'INVALID' not in err
assert exit_code == 0
def test_main_o2_ref_valid(capsys):
argv = [data.VALID_ONIX2_REF]
exit_code = main(argv)
out, err = capsys.readouterr()
assert 'VALID' in out
assert 'INVALID' not in err
assert exit_code == 0
def test_main_has_ns_valid(capsys):
argv = [data.VALID_ONIX3_REF_NS]
exit_code = main(argv)
out, err = capsys.readouterr()
assert 'VALID' in out
assert 'INVALID' not in err
assert exit_code == 0
def test_main_plain_invalid(capsys):
argv = [data.INVALID_ONIX3_REF]
exit_code = main(argv)
out, err = capsys.readouterr()
assert 'INVALID' in err
assert exit_code == 1
def test_main_debug(capsys):
argv = [data.VALID_ONIX3_REF, '-d']
exit_code = main(argv)
out, err = capsys.readouterr()
assert 'DEBUG' in out
assert exit_code == 0
def test_console_ouptup_encoding(capsys):
argv = [data.WIN_CONSOLE_ISSUE]
exit_code = main(argv)
assert exit_code == 1
out, err = capsys.readouterr()
assert 'UnicodeEncodeError' not in out
assert 'UnicodeEncodeError' not in err
| titusz/onixcheck | tests/test_onixcheck.py | Python | bsd-2-clause | 1,414 |
'''
Author: Hans Erik Heggem
Email: hans.erik.heggem@gmail.com
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision)
'''
################### UNIT TEST ########################
import unittest
from Settings.TestData import TestData
from TestUnits.Test_main import Test_main
'''
@brief Test unit for DroneVision
'''
class Test_DroneVision(unittest.TestCase, Test_main, TestData):
def setUp(self):
'''
@brief Give all setups to the unit test.
'''
self.SetAllKey()
self.InitTestData()
#### IMPORTS #####
from Settings import Settings
from src.DroneVision import DroneVision
self.Settings = Settings
self.DroneVision = DroneVision
##################
def tearDown(self):
'''
@brief Give all tear down steps.
Is runned even if the test failed.
'''
pass
def test_DroneVision(self):
'''
@brief Main start test function.
Append functions to test for this unit.
'''
###### START TEST #####
print 'Tested by the DroneMaster/DroneSlave test'
########################### | hansehe/Wind-Blade-Inspection | TestUnits/Test_src/Test_DroneVision/Test_DroneVision.py | Python | mit | 1,087 |
from fabric.api import run, env, hosts, task, runs_once
from fabric.state import output
from fabric.utils import fastprint
from certcheck import get_hosts, get_certs_to_check, check_certs, get_weblist, check_on_web
env.hosts = get_hosts()
output.running = False
output.status = False
output.stdout = False
@task
def check_cert():
fastprint("checking on %s " % (env.host))
check_certs(get_certs_to_check(env.host))
@task
@runs_once
def check_web():
check_on_web(get_weblist())
| tomaslibal/when-expires | fabfile.py | Python | gpl-3.0 | 492 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# load admin modules
from django.contrib import admin
admin.autodiscover()
# load api router
from .apps.storage import views as storage_view
# from .apps.sharing import views as storage_view
from rest_framework.routers import DefaultRouter
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'storages', storage_view.StorageViewSet)
router.register(r'resources', storage_view.ResourceViewSet)
urls = (
url(r'^$', TemplateView.as_view(template_name='base.html')),
# Examples:
# url(r'^$', 'cassetto.views.home', name='home'),
# url(r'^cassetto/', include('cassetto.foo.urls')),
url(r'^api/v1/', include(router.urls)),
url(r'^download/(?P<username>[\w.@+-]+)/(?P<storage>[-a-zA-Z0-9_]+)/(?P<path>.+)',
storage_view.download_view, name='resource-download'),
# url(r'^api/v1/storages$', storage_view.StorageList.as_view(actions={'get': 'list', 'post': 'create'}), name='storage-list'),
# url(r'^api/v1/storages/(?P<username>[\w.@+-]+)$', storage_view.StorageList.as_view(actions={'get': 'list', 'post': 'create'}), name='storage-user-list'),
# url(r'^api/v1/storages/(?P<username>[\w.@+-]+)/(?P<code>[-a-zA-Z0-9_]+)$', storage_view.StorageDetail.as_view(actions={'get': 'retrieve', 'post': 'create'}), name='storage-detail'),
# url(r'^api/v1/storages/(?P<username>[\w.@+-]+)/(?P<code>[-a-zA-Z0-9_]+)/(?P<path>.+)$', storage_view.ResourceDetail.as_view(actions={'get': 'retrieve', 'post': 'create'}), name='storage-resource-detail'),
url(r'^accounts/', include('allauth.urls')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns = patterns('', *urls)
# static and media urls not works with DEBUG = True, see static function.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| openpolis/op-cassetto | project/cassetto/urls.py | Python | bsd-3-clause | 2,320 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Team:
# J Phani Mahesh <phanimahesh@gmail.com>
# Barneedhar (jokerdino) <barneedhar@ubuntu.com>
# Amith KK <amithkumaran@gmail.com>
# Georgi Karavasilev <motorslav@gmail.com>
# Sam Tran <samvtran@gmail.com>
# Sam Hewitt <hewittsamuel@gmail.com>
#
# Description:
# A One-stop configuration tool for Unity.
#
# Legal Stuff:
#
# This file is a part of Unity Tweak Tool
#
# Unity Tweak Tool is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# Unity Tweak Tool is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <https://www.gnu.org/licenses/gpl-3.0.txt>
''' Run this file from the top devel dir for testing purposes. '''
import sys,os
sys.path.append(os.path.split(__name__)[0])
os.system(os.path.join(os.path.dirname(__file__),'unity-tweak-tool'))
| samvtran/unity-tweak-tool | develtest.py | Python | gpl-3.0 | 1,222 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import time
from flask import request
from markupsafe import escape
from wtforms.fields import BooleanField, HiddenField, SelectField, StringField, TextAreaField
from wtforms.fields.html5 import IntegerField
from wtforms.validators import DataRequired, NumberRange, Optional
from indico.core.db import db
from indico.modules.events.surveys.models.surveys import Survey
from indico.util.i18n import _
from indico.util.placeholders import get_missing_placeholders, render_placeholder_info
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import EmailListField, FileField, IndicoDateTimeField
from indico.web.forms.validators import HiddenUnless, LinkedDateTime, UsedIf, ValidationError
from indico.web.forms.widgets import CKEditorWidget, SwitchWidget
class SurveyForm(IndicoForm):
_notification_fields = ('notifications_enabled', 'notify_participants', 'start_notification_emails',
'new_submission_emails')
title = StringField(_('Title'), [DataRequired()], description=_('The title of the survey'))
introduction = TextAreaField(_('Introduction'), description=_('An introduction to be displayed before the survey'))
anonymous = BooleanField(_('Anonymous submissions'), widget=SwitchWidget(),
description=_('User information will not be attached to submissions'))
require_user = BooleanField(_('Only logged-in users'), [HiddenUnless('anonymous')], widget=SwitchWidget(),
description=_('Require users to be logged in for submitting the survey'))
limit_submissions = BooleanField(_('Limit submissions'), widget=SwitchWidget(),
description=_('Whether there is a submission cap'))
submission_limit = IntegerField(_('Capacity'),
[HiddenUnless('limit_submissions'), DataRequired(), NumberRange(min=1)],
description=_('Maximum number of submissions accepted'))
private = BooleanField(_('Private survey'), widget=SwitchWidget(),
description=_('Only selected people can answer the survey.'))
partial_completion = BooleanField(_('Partial completion'), widget=SwitchWidget(),
description=_('Allow to save answers without submitting the survey.'))
notifications_enabled = BooleanField(_('Enabled'), widget=SwitchWidget(),
description=_('Send email notifications for specific events related to the '
'survey.'))
notify_participants = BooleanField(_('Participants'), [HiddenUnless('notifications_enabled', preserve_data=True)],
widget=SwitchWidget(),
description=_('Notify participants of the event when this survey starts.'))
start_notification_emails = EmailListField(_('Start notification recipients'),
[HiddenUnless('notifications_enabled', preserve_data=True)],
description=_('Email addresses to notify about the start of the survey'))
new_submission_emails = EmailListField(_('New submission notification recipients'),
[HiddenUnless('notifications_enabled', preserve_data=True)],
description=_('Email addresses to notify when a new submission is made'))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
def validate_title(self, field):
query = (Survey.query.with_parent(self.event)
.filter(db.func.lower(Survey.title) == field.data.lower(),
Survey.title != field.object_data,
~Survey.is_deleted))
if query.count():
raise ValidationError(_('There is already a survey named "{}" on this event').format(escape(field.data)))
def post_validate(self):
if not self.anonymous.data:
self.require_user.data = True
class ScheduleSurveyForm(IndicoForm):
start_dt = IndicoDateTimeField(_('Start'), [UsedIf(lambda form, field: form.allow_reschedule_start), Optional()],
default_time=time(0, 0),
description=_('Moment when the survey will open for submissions'))
end_dt = IndicoDateTimeField(_('End'), [Optional(), LinkedDateTime('start_dt')],
default_time=time(23, 59),
description=_('Moment when the survey will close'))
resend_start_notification = BooleanField(_('Resend start notification'), widget=SwitchWidget(),
description=_('Resend the survey start notification.'))
def __init__(self, *args, **kwargs):
survey = kwargs.pop('survey')
self.allow_reschedule_start = kwargs.pop('allow_reschedule_start')
self.timezone = survey.event.timezone
super().__init__(*args, **kwargs)
if not survey.start_notification_sent or not self.allow_reschedule_start:
del self.resend_start_notification
class SectionForm(IndicoForm):
display_as_section = BooleanField(_('Display as section'), widget=SwitchWidget(), default=True,
description=_('Whether this is going to be displayed as a section or standalone'))
title = StringField(_('Title'), [HiddenUnless('display_as_section', preserve_data=True), DataRequired()],
description=_('The title of the section.'))
description = TextAreaField(_('Description'), [HiddenUnless('display_as_section', preserve_data=True)],
description=_('The description text of the section.'))
class TextForm(IndicoForm):
description = TextAreaField(_('Text'),
description=_('The text that should be displayed.'))
class ImportQuestionnaireForm(IndicoForm):
json_file = FileField(_('File'), accepted_file_types='application/json,.json',
description=_('Choose a previously exported survey content to import. '
'Existing sections will be preserved.'))
class InvitationForm(IndicoForm):
from_address = SelectField(_('From'), [DataRequired()])
subject = StringField(_('Subject'), [DataRequired()])
body = TextAreaField(_('Email body'), [DataRequired()], widget=CKEditorWidget(simple=True))
recipients = EmailListField(_('Recipients'), [DataRequired()], description=_('One email address per line.'))
copy_for_sender = BooleanField(_('Send copy to me'), widget=SwitchWidget())
submitted = HiddenField()
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
super().__init__(*args, **kwargs)
self.from_address.choices = list(event.get_allowed_sender_emails().items())
self.body.description = render_placeholder_info('survey-link-email', event=None, survey=None)
def is_submitted(self):
return super().is_submitted() and 'submitted' in request.form
def validate_body(self, field):
missing = get_missing_placeholders('survey-link-email', field.data, event=None, survey=None)
if missing:
raise ValidationError(_('Missing placeholders: {}').format(', '.join(missing)))
| ThiefMaster/indico | indico/modules/events/surveys/forms.py | Python | mit | 7,764 |
"""
Django settings for screencapper project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import dj_database_url
from decouple import Csv, config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool)
DEBUG_PROPAGATE_EXCEPTIONS = config('DEBUG_PROPAGATE_EXCEPTIONS', default=False, cast=bool)
TEMPLATE_DEBUG = config('DEBUG', default=DEBUG, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
# Project specific apps
'screencapper.base',
'screencapper.api',
'screencapper.receiver',
# Third party apps
'django_nose',
# Django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
for app in config('EXTRA_APPS', default='', cast=Csv()):
INSTALLED_APPS.append(app)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'csp.middleware.CSPMiddleware',
)
ROOT_URLCONF = 'screencapper.urls'
WSGI_APPLICATION = 'screencapper.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': config(
'DATABASE_URL',
cast=dj_database_url.parse
)
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')
TIME_ZONE = config('TIME_ZONE', default='UTC')
USE_I18N = config('USE_I18N', default=True, cast=bool)
USE_L10N = config('USE_L10N', default=True, cast=bool)
USE_TZ = config('USE_TZ', default=True, cast=bool)
STATIC_ROOT = config('STATIC_ROOT', default=os.path.join(BASE_DIR, 'static'))
STATIC_URL = config('STATIC_URL', '/static/')
MEDIA_ROOT = config('MEDIA_ROOT', default=os.path.join(BASE_DIR, 'media'))
MEDIA_URL = config('MEDIA_URL', '/media/')
SESSION_COOKIE_SECURE = config('SESSION_COOKIE_SECURE', default=True, cast=bool)
TEMPLATE_LOADERS = (
'jingo.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# Django-CSP
CSP_DEFAULT_SRC = (
"'self'",
)
CSP_FONT_SRC = (
"'self'",
'http://*.mozilla.net',
'https://*.mozilla.net'
)
CSP_IMG_SRC = (
"'self'",
'http://*.mozilla.net',
'https://*.mozilla.net',
)
CSP_SCRIPT_SRC = (
"'self'",
'http://www.mozilla.org',
'https://www.mozilla.org',
'http://*.mozilla.net',
'https://*.mozilla.net',
)
CSP_STYLE_SRC = (
"'self'",
"'unsafe-inline'",
'http://www.mozilla.org',
'https://www.mozilla.org',
'http://*.mozilla.net',
'https://*.mozilla.net',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
SESSION_COOKIE_SECURE = not DEBUG
ALLIGATOR_CONN = config('ALLIGATOR_CONN', default='redis://localhost:6379/0')
| peterbe/django-screencapper | screencapper/settings/base.py | Python | mpl-2.0 | 3,786 |
import numpy as np
from .utils import get_func, check_boolean, isstr, aggregate_common_doc
from .utils_numpy import aliasing, minimum_dtype, minimum_dtype_scalar
from .aggregate_numpy import _aggregate_base
def _anynan(group_idx, a, size, fill_value, dtype=None):
return _any(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _allnan(group_idx, a, size, fill_value, dtype=None):
return _all(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _any(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if fill_value:
ret[group_idx] = False # any-test should start from False
np.logical_or.at(ret, group_idx, a)
return ret
def _all(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if not fill_value:
ret[group_idx] = True # all-test should start from True
np.logical_and.at(ret, group_idx, a)
return ret
def _sum(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 0:
ret[group_idx] = 0 # sums should start at 0
np.add.at(ret, group_idx, a)
return ret
def _len(group_idx, a, size, fill_value, dtype=None):
return _sum(group_idx, 1, size, fill_value, dtype=int)
def _prod(group_idx, a, size, fill_value, dtype=None):
"""Same as aggregate_numpy.py"""
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 1:
ret[group_idx] = 1 # product should start from 1
np.multiply.at(ret, group_idx, a)
return ret
def _min(group_idx, a, size, fill_value, dtype=None):
"""Same as aggregate_numpy.py"""
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmax:
ret[group_idx] = dmax # min starts from maximum
np.minimum.at(ret, group_idx, a)
return ret
def _max(group_idx, a, size, fill_value, dtype=None):
"""Same as aggregate_numpy.py"""
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmin:
ret[group_idx] = dmin # max starts from minimum
np.maximum.at(ret, group_idx, a)
return ret
_impl_dict = dict(min=_min, max=_max, sum=_sum, prod=_prod, all=_all, any=_any,
allnan=_allnan, anynan=_anynan, len=_len)
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
raise NotImplementedError("No such ufunc available")
return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,
order=order, dtype=dtype, func=func, axis=axis,
_impl_dict=_impl_dict, _nansqueeze=False, **kwargs)
aggregate.__doc__ = """
Unlike ``aggregate_numpy``, which in most cases does some custom
optimisations, this version simply uses ``numpy``'s ``ufunc.at``.
As of version 1.14 this gives fairly poor performance. There should
normally be no need to use this version, it is intended to be used in
testing and benchmarking only.
""" + aggregate_common_doc
| ml31415/numpy-groupies | numpy_groupies/aggregate_numpy_ufunc.py | Python | bsd-2-clause | 3,703 |
#!/usr/bin/python
# encoding: utf-8
"""
sifts.py test code.
Created by Shane O'Connor 2016
"""
import sys
import os
import time
import pprint
sys.path.insert(0, os.path.join('..', '..'))
from klab import colortext
from klab.bio.sifts import SIFTS
#for pdb_id in ['1AQT', '1lmb', '1utx', '2gzu', '2pnr', '1y8p', '2q8i', '1y8n', '1y8o', '1oax', '3dvn', '1mnu', '1mcl', '2p4a', '1s78', '1i8k']:
for pdb_id in ['2pnr']:
print('\n')
colortext.message(pdb_id)
s = SIFTS.retrieve(pdb_id, cache_dir = '/kortemmelab/data/oconchus/SIFTS', acceptable_sequence_percentage_match = 70.0)
colortext.warning(pprint.pformat(s.region_mapping))
colortext.warning(pprint.pformat(s.region_map_coordinate_systems))
colortext.warning(pprint.pformat(s.pfam_scop_mapping))
colortext.warning(pprint.pformat(s.scop_pfam_mapping))
print('\n')
print('\n\n')
| Kortemme-Lab/klab | .test/bio/sifts.py | Python | mit | 867 |
#!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2007, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
#
# Created by NJG on Wed, Apr 18, 2007
#
# Queueing model of an email-spam analyzer system comprising a
# battery of SMP servers essentially running in batch mode.
# Each node was a 4-way SMP server.
# The performance metric of interest was the mean queue length.
#
# This simple M/M/4 model gave results that were in surprisingly
# good agreement with monitored queue lengths.
#
# $Id: spamcan2.py,v 1.2 2009/03/31 00:48:34 pfeller Exp $
import pdq
# Measured performance parameters
cpusPerServer = 4
emailThruput = 678 # emails per hour
scannerTime = 12.0 # seconds per email
pdq.Init("Spam Farm Model")
# Timebase is SECONDS ...
nstreams = pdq.CreateOpen("Email", float(emailThruput)/3600)
nnodes = pdq.CreateNode("spamCan", int(cpusPerServer), pdq.MSQ)
pdq.SetDemand("spamCan", "Email", scannerTime)
pdq.Solve(pdq.CANON)
pdq.Report()
| peterlharding/PDQ | examples/Linux Magazine/spamcan2.py | Python | mit | 2,040 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Derek Carter<goozbach@friocorte.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: selinux
short_description: Change policy and state of SELinux
description:
- Configures the SELinux mode and policy. A reboot may be required after usage. Ansible will not issue this reboot but will let you know when it is required.
version_added: "0.7"
options:
policy:
description:
- "name of the SELinux policy to use (example: C(targeted)) will be required if state is not C(disabled)"
required: false
default: null
state:
description:
- The SELinux mode
required: true
default: null
choices: [ "enforcing", "permissive", "disabled" ]
conf:
description:
- path to the SELinux configuration file, if non-standard
required: false
default: "/etc/selinux/config"
notes:
- Not tested on any debian based system
requirements: [ libselinux-python ]
author: "Derek Carter (@goozbach) <goozbach@friocorte.com>"
'''
EXAMPLES = '''
# Enable SELinux
- selinux:
policy: targeted
state: enforcing
# Put SELinux in permissive mode, logging actions that would be blocked.
- selinux:
policy: targeted
state: permissive
# Disable SELinux
- selinux:
state: disabled
'''
import os
import re
try:
import selinux
HAS_SELINUX = True
except ImportError:
HAS_SELINUX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts import get_file_lines
# getter subroutines
def get_config_state(configfile):
lines = get_file_lines(configfile)
for line in lines:
stateline = re.match(r'^SELINUX=.*$', line)
if stateline:
return line.split('=')[1].strip()
def get_config_policy(configfile):
lines = get_file_lines(configfile)
for line in lines:
stateline = re.match(r'^SELINUXTYPE=.*$', line)
if stateline:
return line.split('=')[1].strip()
# setter subroutines
def set_config_state(state, configfile):
# SELINUX=permissive
# edit config file with state value
stateline = 'SELINUX=%s' % state
lines = get_file_lines(configfile)
with open(configfile, "w") as write_file:
for line in lines:
write_file.write(re.sub(r'^SELINUX=.*', stateline, line))
def set_state(module, state):
if state == 'enforcing':
selinux.security_setenforce(1)
elif state == 'permissive':
selinux.security_setenforce(0)
elif state == 'disabled':
pass
else:
msg = 'trying to set invalid runtime state %s' % state
module.fail_json(msg=msg)
def set_config_policy(policy, configfile):
# edit config file with state value
# SELINUXTYPE=targeted
policyline = 'SELINUXTYPE=%s' % policy
lines = get_file_lines(configfile)
with open(configfile, "w") as write_file:
for line in lines:
write_file.write(re.sub(r'^SELINUXTYPE=.*', policyline, line))
def main():
module = AnsibleModule(
argument_spec=dict(
policy=dict(required=False),
state=dict(choices=['enforcing', 'permissive', 'disabled'], required=True),
configfile=dict(aliases=['conf', 'file'], default='/etc/selinux/config')
),
supports_check_mode=True
)
if not HAS_SELINUX:
module.fail_json(msg='libselinux-python required for this module')
# global vars
changed = False
msgs = []
configfile = module.params['configfile']
policy = module.params['policy']
state = module.params['state']
runtime_enabled = selinux.is_selinux_enabled()
runtime_policy = selinux.selinux_getpolicytype()[1]
runtime_state = 'disabled'
if runtime_enabled:
# enabled means 'enforcing' or 'permissive'
if selinux.security_getenforce():
runtime_state = 'enforcing'
else:
runtime_state = 'permissive'
if not os.path.isfile(configfile):
module.fail_json(msg="Unable to find file {0}".format(configfile),
details="Please install SELinux-policy package, "
"if this package is not installed previously.")
config_policy = get_config_policy(configfile)
config_state = get_config_state(configfile)
# check to see if policy is set if state is not 'disabled'
if state != 'disabled':
if not policy:
module.fail_json(msg='policy is required if state is not \'disabled\'')
else:
if not policy:
policy = config_policy
# check changed values and run changes
if policy != runtime_policy:
if module.check_mode:
module.exit_json(changed=True)
# cannot change runtime policy
msgs.append('reboot to change the loaded policy')
changed = True
if policy != config_policy:
if module.check_mode:
module.exit_json(changed=True)
msgs.append('config policy changed from \'%s\' to \'%s\'' % (config_policy, policy))
set_config_policy(policy, configfile)
changed = True
if state != runtime_state:
if module.check_mode:
module.exit_json(changed=True)
if runtime_enabled:
if state == 'disabled':
if runtime_state != 'permissive':
# Temporarily set state to permissive
set_state(module, 'permissive')
msgs.append('runtime state temporarily changed from \'%s\' to \'permissive\', state change will take effect next reboot' % (runtime_state))
else:
msgs.append('state change will take effect next reboot')
else:
set_state(module, state)
msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state))
else:
msgs.append('state change will take effect next reboot')
changed = True
if state != config_state:
if module.check_mode:
module.exit_json(changed=True)
msgs.append('config state changed from \'%s\' to \'%s\'' % (config_state, state))
set_config_state(state, configfile)
changed = True
module.exit_json(changed=changed, msg=', '.join(msgs), configfile=configfile, policy=policy, state=state)
#################################################
if __name__ == '__main__':
main()
| randynobx/ansible | lib/ansible/modules/system/selinux.py | Python | gpl-3.0 | 7,214 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import test
class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
@classmethod
def setup_credentials(cls):
# For this test no network resources are needed
cls.set_network_resources()
super(VirtualInterfacesNegativeTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(VirtualInterfacesNegativeTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@test.attr(type=['negative'])
@test.idempotent_id('64ebd03c-1089-4306-93fa-60f5eb5c803c')
@test.services('network')
def test_list_virtual_interfaces_invalid_server_id(self):
# Negative test: Should not be able to GET virtual interfaces
# for an invalid server_id
invalid_server_id = str(uuid.uuid4())
self.assertRaises(lib_exc.NotFound,
self.client.list_virtual_interfaces,
invalid_server_id)
| hayderimran7/tempest | tempest/api/compute/servers/test_virtual_interfaces_negative.py | Python | apache-2.0 | 1,691 |
# -*- coding: utf-8 -*-
#
# config.py - Global Mycodo settings
#
import binascii
import sys
from datetime import timedelta
import os
from flask_babel import lazy_gettext
# Append proper path for other software reading this config file
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from config_translations import TRANSLATIONS
MYCODO_VERSION = '8.12.9'
ALEMBIC_VERSION = 'b354722c9b8b'
# FORCE_UPGRADE_MASTER
# Set True to enable upgrading to the master branch of the Mycodo repository.
# Set False to enable upgrading to the latest Release version (default).
# Do not use this feature unless you know what you're doing or have been
# instructed to do so, as it can really mess up your system.
FORCE_UPGRADE_MASTER = False
# Final release for each major version number
# Used to determine proper upgrade page to display
FINAL_RELEASES = ['5.7.3', '6.4.7', '7.10.0']
# ENABLE FLASK PROFILER
# Accessed at https://127.0.0.1/mycodo-flask-profiler
ENABLE_FLASK_PROFILER = False
LANGUAGES = {
'en': 'English',
'de': 'Deutsche (German)',
'es': 'Español (Spanish)',
'fr': 'Français (French)',
'it': 'Italiano (Italian)',
'nl': 'Nederlands (Dutch)',
'nb': 'Norsk (Norwegian)',
'pl': 'Polski (Polish)',
'pt': 'Português (Portuguese)',
'ru': 'русский язык (Russian)',
'sr': 'српски (Serbian)',
'sv': 'Svenska (Swedish)',
'zh': '中文 (Chinese)'
}
DASHBOARD_WIDGETS = [
('', "{} {} {}".format(lazy_gettext('Add'), lazy_gettext('Dashboard'), lazy_gettext('Widget'))),
('spacer', lazy_gettext('Spacer')),
('graph', lazy_gettext('Graph')),
('gauge', lazy_gettext('Gauge')),
('indicator', TRANSLATIONS['indicator']['title']),
('measurement', TRANSLATIONS['measurement']['title']),
('output', TRANSLATIONS['output']['title']),
('output_pwm_slider', '{}: {}'.format(
TRANSLATIONS['output']['title'], lazy_gettext('PWM Slider'))),
('pid_control', lazy_gettext('PID Control')),
('python_code', lazy_gettext('Python Code')),
('camera', TRANSLATIONS['camera']['title'])
]
# Camera info
CAMERA_INFO = {
'fswebcam': {
'name': 'fswebcam',
'dependencies_module': [
('apt', 'fswebcam', 'fswebcam')
],
'capable_image': True,
'capable_stream': False
},
'libcamera': {
'name': 'libcamera',
'dependencies_module': [
('apt', 'libcamera-apps-lite', 'libcamera-apps-lite')
],
'capable_image': True,
'capable_stream': False
},
'opencv': {
'name': 'OpenCV',
'dependencies_module': [
('pip-pypi', 'imutils', 'imutils'),
('apt', 'python3-opencv', 'python3-opencv'),
],
'capable_image': True,
'capable_stream': True
},
'picamera': {
'name': 'PiCamera (deprecated)',
'dependencies_module': [
('pip-pypi', 'picamera', 'picamerab==1.13b1')
],
'capable_image': True,
'capable_stream': True
},
'raspistill': {
'name': 'raspistill (deprecated)',
'dependencies_module': [],
'capable_image': True,
'capable_stream': False
},
'http_address': {
'name': 'URL (urllib)',
'dependencies_module': [
('pip-pypi', 'imutils', 'imutils'),
('apt', 'python3-opencv', 'python3-opencv'),
],
'capable_image': True,
'capable_stream': True
},
'http_address_requests': {
'name': 'URL (requests)',
'dependencies_module': [
('pip-pypi', 'imutils', 'imutils'),
('apt', 'python3-opencv', 'python3-opencv'),
],
'capable_image': True,
'capable_stream': False
},
}
# LCD info
LCD_INFO = {
'16x2_generic': {
'name': '16x2 LCD',
'dependencies_module': [],
'interfaces': ['I2C']
},
'20x4_generic': {
'name': '20x4 LCD',
'dependencies_module': [],
'interfaces': ['I2C']
},
'16x2_grove_lcd_rgb': {
'name': '16x2 Grove LCD RGB',
'dependencies_module': [],
'interfaces': ['I2C']
},
'128x32_pioled_circuit_python': {
'name': '128x32 OLED (SD1306, CircuitPython)',
'message': "This module uses the newer Adafruit CircuitPython library. The older Adafruit_SSD1306 library is deprecated and not recommended to be used.",
'dependencies_module': [
('apt', 'libjpeg-dev', 'libjpeg-dev'),
('pip-pypi', 'PIL', 'Pillow==8.1.2'),
('pip-pypi', 'usb.core', 'pyusb==1.1.1'),
('pip-pypi', 'adafruit_extended_bus', 'Adafruit-extended-bus==1.0.2'),
('pip-pypi', 'adafruit_framebuf', 'adafruit-circuitpython-framebuf'),
('pip-pypi', 'adafruit_ssd1306', 'Adafruit-Circuitpython-SSD1306')
],
'interfaces': ['I2C', 'SPI']
},
'128x64_pioled_circuit_python': {
'name': '128x64 OLED (SD1306, CircuitPython)',
'message': "This module uses the newer Adafruit CircuitPython library. The older Adafruit_SSD1306 library is deprecated and not recommended to be used.",
'dependencies_module': [
('apt', 'libjpeg-dev', 'libjpeg-dev'),
('pip-pypi', 'PIL', 'Pillow==8.1.2'),
('pip-pypi', 'usb.core', 'pyusb==1.1.1'),
('pip-pypi', 'adafruit_extended_bus', 'Adafruit-extended-bus==1.0.2'),
('pip-pypi', 'adafruit_framebuf', 'adafruit-circuitpython-framebuf'),
('pip-pypi', 'adafruit_ssd1306', 'Adafruit-Circuitpython-SSD1306')
],
'interfaces': ['I2C', 'SPI']
},
'128x32_pioled': {
'name': '128x32 OLED (SD1306, Adafruit_SSD1306)',
'message': "This module uses the older Adafruit_SSD1306 library that is deprecated and is not recommended to be used. It is recommended to use the other module that uses the newer Adafruit CircuitPython library.",
'dependencies_module': [
('apt', 'libjpeg-dev', 'libjpeg-dev'),
('pip-pypi', 'PIL', 'Pillow==8.1.2'),
('pip-pypi', 'Adafruit_GPIO', 'Adafruit-GPIO==1.0.3'),
('pip-pypi', 'Adafruit_PureIO', 'Adafruit-PureIO==1.1.8'),
('pip-pypi', 'Adafruit_SSD1306', 'git+https://github.com/adafruit/Adafruit_Python_SSD1306.git')
],
'interfaces': ['I2C', 'SPI']
},
'128x64_pioled': {
'name': '128x64 OLED (SD1306, Adafruit_SSD1306)',
'message': "This module uses the older Adafruit_SSD1306 library that is deprecated and is not recommended to be used. It is recommended to use the other module that uses the newer Adafruit CircuitPython library.",
'dependencies_module': [
('apt', 'libjpeg-dev', 'libjpeg-dev'),
('pip-pypi', 'PIL', 'Pillow==8.1.2'),
('pip-pypi', 'Adafruit_GPIO', 'Adafruit-GPIO==1.0.3'),
('pip-pypi', 'Adafruit_PureIO', 'Adafruit-PureIO==1.1.8'),
('pip-pypi', 'Adafruit_SSD1306', 'git+https://github.com/adafruit/Adafruit_Python_SSD1306.git')
],
'interfaces': ['I2C', 'SPI']
}
}
# Math form dropdown
LCDS = [
('16x2_generic', LCD_INFO['16x2_generic']['name']),
('20x4_generic', LCD_INFO['20x4_generic']['name']),
('16x2_grove_lcd_rgb', LCD_INFO['16x2_grove_lcd_rgb']['name']),
('128x32_pioled', LCD_INFO['128x32_pioled']['name']),
('128x64_pioled', LCD_INFO['128x64_pioled']['name']),
('128x32_pioled_circuit_python', LCD_INFO['128x32_pioled_circuit_python']['name']),
('128x64_pioled_circuit_python', LCD_INFO['128x64_pioled_circuit_python']['name'])
]
# Math info
MATH_INFO = {
'average': {
'name': "{} ({}, {})".format(lazy_gettext('Average'), lazy_gettext('Last'), lazy_gettext('Multiple Channels')),
'dependencies_module': [],
'enable_measurements_select': True,
'measure': {}
},
'average_single': {
'name': "{} ({}, {})".format(lazy_gettext('Average'), lazy_gettext('Past'), lazy_gettext('Single Channel')),
'dependencies_module': [],
'enable_measurements_select': False,
'enable_measurements_convert': True,
'measure': {}
},
'sum': {
'name': "{} ({}, {})".format(lazy_gettext('Sum'), lazy_gettext('Last'), lazy_gettext('Multiple Channels')),
'dependencies_module': [],
'enable_measurements_select': True,
'measure': {}
},
'sum_single': {
'name': "{} ({}, {})".format(lazy_gettext('Sum'), lazy_gettext('Past'), lazy_gettext('Single Channel')),
'dependencies_module': [],
'enable_measurements_select': False,
'enable_measurements_convert': True,
'measure': {}
},
'difference': {
'name': lazy_gettext('Difference'),
'dependencies_module': [],
'enable_measurements_select': True,
'measure': {}
},
'equation': {
'name': lazy_gettext('Equation'),
'dependencies_module': [],
'enable_measurements_select': True,
'measure': {}
},
'humidity': {
'name': "{} ({})".format(lazy_gettext('Humidity'), lazy_gettext('Wet/Dry-Bulb')),
'dependencies_module': [],
'enable_measurements_convert': True,
'measure': {
0: {
'measurement': 'humidity',
'unit': 'percent'
},
1: {
'measurement': 'humidity_ratio',
'unit': 'kg_kg'
},
2: {
'measurement': 'specific_enthalpy',
'unit': 'kJ_kg'
},
3: {
'measurement': 'specific_volume',
'unit': 'm3_kg'
}
}
},
'redundancy': {
'name': lazy_gettext('Redundancy'),
'dependencies_module': [],
'enable_measurements_select': True,
'measure': {}
},
'statistics': {
'name': lazy_gettext('Statistics'),
'dependencies_module': [],
'enable_single_measurement_select': True,
'measure': {
0: {
'measurement': '',
'unit': '',
'name': 'Mean'
},
1: {
'measurement': '',
'unit': '',
'name': 'Median'
},
2: {
'measurement': '',
'unit': '',
'name': 'Minimum'
},
3: {
'measurement': '',
'unit': '',
'name': 'Maximum'
},
4: {
'measurement': '',
'unit': '',
'name': 'Standard Deviation'
},
5: {
'measurement': '',
'unit': '',
'name': 'St. Dev. of Mean (upper)'
},
6: {
'measurement': '',
'unit': '',
'name': 'St. Dev. of Mean (lower)'
}
}
},
'verification': {
'name': lazy_gettext('Verification'),
'dependencies_module': [],
'enable_measurements_select': True,
'measure': {}
},
'vapor_pressure_deficit': {
'name': lazy_gettext('Vapor Pressure Deficit'),
'dependencies_module': [],
'enable_measurements_select': False,
'measure': {
0: {
'measurement': 'vapor_pressure_deficit',
'unit': 'Pa'
}
}
}
}
METHOD_DEP_BASE = [
('apt', 'unzip', 'unzip'),
('bash-commands',
[
'/var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/highcharts-9.1.2.js'
],
[
'wget --no-clobber https://code.highcharts.com/zips/Highcharts-9.1.2.zip',
'unzip Highcharts-9.1.2.zip -d Highcharts-9.1.2',
'cp -rf Highcharts-9.1.2/code/highcharts.js /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/highcharts-9.1.2.js',
'cp -rf Highcharts-9.1.2/code/highcharts.js.map /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/highcharts.js.map',
'rm -rf Highcharts-9.1.2'
])
]
# Method info
METHOD_INFO = {
'Date': {
'name': lazy_gettext('Time/Date'),
'dependencies_module': METHOD_DEP_BASE
},
'Duration': {
'name': lazy_gettext('Duration'),
'dependencies_module': METHOD_DEP_BASE
},
'Daily': {
'name': "{} ({})".format(lazy_gettext('Daily'), lazy_gettext('Time-Based')),
'dependencies_module': METHOD_DEP_BASE
},
'DailySine': {
'name': "{} ({})".format(lazy_gettext('Daily'), lazy_gettext('Sine Wave')),
'dependencies_module': METHOD_DEP_BASE
},
'DailyBezier': {
'name': "{} ({})".format(lazy_gettext('Daily'), lazy_gettext('Bezier Curve')),
'dependencies_module': [
('apt', 'libatlas-base-dev', 'libatlas-base-dev'),
('apt', 'python3-numpy', 'python3-numpy')
] + METHOD_DEP_BASE
},
'Cascade': {
'name': lazy_gettext('Method Cascade'),
'dependencies_module': METHOD_DEP_BASE
}
}
# Method form dropdown
METHODS = [
('Date', METHOD_INFO['Date']['name']),
('Duration', METHOD_INFO['Duration']['name']),
('Daily', METHOD_INFO['Daily']['name']),
('DailySine', METHOD_INFO['DailySine']['name']),
('DailyBezier', METHOD_INFO['DailyBezier']['name']),
('Cascade', METHOD_INFO['Cascade']['name'])
]
PID_INFO = {
'measure': {
0: {
'measurement': '',
'unit': '',
'name': '{}'.format(TRANSLATIONS['setpoint']['title']),
'measurement_type': 'setpoint'
},
1: {
'measurement': '',
'unit': '',
'name': '{} ({})'.format(
TRANSLATIONS['setpoint']['title'], lazy_gettext('Band Min')),
'measurement_type': 'setpoint'
},
2: {
'measurement': '',
'unit': '',
'name': '{} ({})'.format(
TRANSLATIONS['setpoint']['title'], lazy_gettext('Band Max')),
'measurement_type': 'setpoint'
},
3: {
'measurement': 'pid_p_value',
'unit': 'pid_value',
'name': 'P-value'
},
4: {
'measurement': 'pid_i_value',
'unit': 'pid_value',
'name': 'I-value'
},
5: {
'measurement': 'pid_d_value',
'unit': 'pid_value',
'name': 'D-value'
},
6: {
'measurement': 'duration_time',
'unit': 's',
'name': '{} ({})'.format(
TRANSLATIONS['output']['title'], TRANSLATIONS['duration']['title'])
},
7: {
'measurement': 'duty_cycle',
'unit': 'percent',
'name': '{} ({})'.format(
TRANSLATIONS['output']['title'], TRANSLATIONS['duty_cycle']['title'])
},
8: {
'measurement': 'volume',
'unit': 'ml',
'name': '{} ({})'.format(
TRANSLATIONS['output']['title'], TRANSLATIONS['volume']['title'])
},
9: {
'measurement': 'unitless',
'unit': 'none',
'name': '{} ({})'.format(
TRANSLATIONS['output']['title'], TRANSLATIONS['value']['title'])
}
}
}
DEPENDENCIES_GENERAL = {
'highstock': {
'name': 'Highstock',
'dependencies_module': [
('apt', 'unzip', 'unzip'),
('bash-commands',
[
'/var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/highstock-9.1.2.js',
'/var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/highcharts-more-9.1.2.js',
'/var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/data-9.1.2.js',
'/var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/exporting-9.1.2.js',
'/var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/export-data-9.1.2.js',
'/var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/offline-exporting-9.1.2.js'
],
[
'wget --no-clobber https://code.highcharts.com/zips/Highcharts-Stock-9.1.2.zip',
'unzip Highcharts-Stock-9.1.2.zip -d Highcharts-Stock-9.1.2',
'cp -rf Highcharts-Stock-9.1.2/code/highstock.js /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/highstock-9.1.2.js',
'cp -rf Highcharts-Stock-9.1.2/code/highstock.js.map /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/highstock.js.map',
'cp -rf Highcharts-Stock-9.1.2/code/highcharts-more.js /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/highcharts-more-9.1.2.js',
'cp -rf Highcharts-Stock-9.1.2/code/highcharts-more.js.map /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/highcharts-more.js.map',
'cp -rf Highcharts-Stock-9.1.2/code/modules/data.js /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/data-9.1.2.js',
'cp -rf Highcharts-Stock-9.1.2/code/modules/data.js.map /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/data.js.map',
'cp -rf Highcharts-Stock-9.1.2/code/modules/exporting.js /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/exporting-9.1.2.js',
'cp -rf Highcharts-Stock-9.1.2/code/modules/exporting.js.map /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/exporting.js.map',
'cp -rf Highcharts-Stock-9.1.2/code/modules/export-data.js /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/export-data-9.1.2.js',
'cp -rf Highcharts-Stock-9.1.2/code/modules/export-data.js.map /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/export-data.js.map',
'cp -rf Highcharts-Stock-9.1.2/code/modules/offline-exporting.js /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/offline-exporting-9.1.2.js',
'cp -rf Highcharts-Stock-9.1.2/code/modules/offline-exporting.js.map /var/mycodo-root/mycodo/mycodo_flask/static/js/user_js/offline-exporting.js.map',
'rm -rf Highcharts-Stock-9.1.2'
])
]
}
}
# Conditional controllers
CONDITIONAL_CONDITIONS = [
('measurement', "{} ({}, {})".format(
TRANSLATIONS['measurement']['title'],
TRANSLATIONS['single']['title'],
TRANSLATIONS['last']['title'])),
('measurement_past_average', "{} ({}, {}, {})".format(
TRANSLATIONS['measurement']['title'],
TRANSLATIONS['single']['title'],
TRANSLATIONS['past']['title'],
TRANSLATIONS['average']['title'])),
('measurement_past_sum', "{} ({}, {}, {})".format(
TRANSLATIONS['measurement']['title'],
TRANSLATIONS['single']['title'],
TRANSLATIONS['past']['title'],
TRANSLATIONS['sum']['title'])),
('measurement_dict', "{} ({}, {})".format(
TRANSLATIONS['measurement']['title'],
TRANSLATIONS['multiple']['title'],
TRANSLATIONS['past']['title'])),
('gpio_state', lazy_gettext('GPIO State')),
('output_state', lazy_gettext('Output State')),
('output_duration_on', lazy_gettext('Output Duration On')),
('controller_status', lazy_gettext("Controller Running")),
]
FUNCTION_INFO = {
'function_actions': {
'name': lazy_gettext('Execute Actions'),
'dependencies_module': []
},
'conditional_conditional': {
'name': '{} {}'.format(
TRANSLATIONS['conditional']['title'],
TRANSLATIONS['controller']['title']),
'dependencies_module': [
('pip-pypi', 'pylint', 'pylint==2.12.2')
]
},
'pid_pid': {
'name': '{} {}'.format(
TRANSLATIONS['pid']['title'],
TRANSLATIONS['controller']['title']),
'dependencies_module': []
},
'trigger_edge': {
'name': '{}: {}'.format(
TRANSLATIONS['trigger']['title'],
TRANSLATIONS['edge']['title']),
'dependencies_module': []
},
'trigger_output': {
'name': '{}: {} ({}/{})'.format(
TRANSLATIONS['trigger']['title'],
TRANSLATIONS['output']['title'],
TRANSLATIONS['on']['title'],
TRANSLATIONS['off']['title']),
'dependencies_module': []
},
'trigger_output_pwm': {
'name': '{}: {} ({})'.format(
TRANSLATIONS['trigger']['title'],
TRANSLATIONS['output']['title'],
TRANSLATIONS['pwm']['title']),
'dependencies_module': []
},
'trigger_timer_daily_time_point': {
'name': lazy_gettext('Trigger: Timer (Daily Point)'),
'dependencies_module': []
},
'trigger_timer_daily_time_span': {
'name': '{}: {} ({})'.format(
TRANSLATIONS['trigger']['title'],
TRANSLATIONS['timer']['title'],
lazy_gettext('Daily Span')),
'dependencies_module': []
},
'trigger_timer_duration': {
'name': '{}: {} ({})'.format(
TRANSLATIONS['trigger']['title'],
TRANSLATIONS['timer']['title'],
TRANSLATIONS['duration']['title']),
'dependencies_module': []
},
'trigger_run_pwm_method': {
'name': '{}: {}'.format(
TRANSLATIONS['trigger']['title'],
lazy_gettext('Run PWM Method')),
'dependencies_module': []
},
'trigger_sunrise_sunset': {
'name': '{}: {}'.format(
TRANSLATIONS['trigger']['title'],
lazy_gettext('Sunrise/Sunset')),
'dependencies_module': []
}
}
FUNCTIONS = [
('function_actions', FUNCTION_INFO['function_actions']['name']),
('conditional_conditional', FUNCTION_INFO['conditional_conditional']['name']),
('pid_pid', FUNCTION_INFO['pid_pid']['name']),
('trigger_edge', FUNCTION_INFO['trigger_edge']['name']),
('trigger_output', FUNCTION_INFO['trigger_output']['name']),
('trigger_output_pwm', FUNCTION_INFO['trigger_output_pwm']['name']),
('trigger_timer_daily_time_point', FUNCTION_INFO['trigger_timer_daily_time_point']['name']),
('trigger_timer_daily_time_span', FUNCTION_INFO['trigger_timer_daily_time_span']['name']),
('trigger_timer_duration', FUNCTION_INFO['trigger_timer_duration']['name']),
('trigger_run_pwm_method', FUNCTION_INFO['trigger_run_pwm_method']['name']),
('trigger_sunrise_sunset', FUNCTION_INFO['trigger_sunrise_sunset']['name'])
]
# User Roles
USER_ROLES = [
dict(id=1, name='Admin',
edit_settings=True, edit_controllers=True, edit_users=True,
view_settings=True, view_camera=True, view_stats=True, view_logs=True,
reset_password=True),
dict(id=2, name='Editor',
edit_settings=True, edit_controllers=True, edit_users=False,
view_settings=True, view_camera=True, view_stats=True, view_logs=True,
reset_password=True),
dict(id=3, name='Monitor',
edit_settings=False, edit_controllers=False, edit_users=False,
view_settings=True, view_camera=True, view_stats=True, view_logs=True,
reset_password=True),
dict(id=4, name='Guest',
edit_settings=False, edit_controllers=False, edit_users=False,
view_settings=False, view_camera=False, view_stats=False, view_logs=False,
reset_password=False),
dict(id=5, name='Kiosk',
edit_settings=False, edit_controllers=False, edit_users=False,
view_settings=False, view_camera=True, view_stats=True, view_logs=False,
reset_password=False)
]
# Web UI themes
THEMES = [
('cerulean', 'Cerulean'),
('cosmo', 'Cosmo'),
('cyborg', 'Cyborg'),
('darkly', 'Darkly'),
('flatly', 'Flatly'),
('journal', 'Journal'),
('literia', 'Literia'),
('lumen', 'Lumen'),
('lux', 'Lux'),
('materia', 'Materia'),
('minty', 'Minty'),
('pulse', 'Pulse'),
('sandstone', 'Sandstone'),
('simplex', 'Simplex'),
('slate', 'Slate'),
('solar', 'Solar'),
('spacelab', 'Spacelab'),
('superhero', 'Superhero'),
('united', 'United'),
('yeti', 'Yeti')
]
THEMES_DARK = ['cyborg', 'darkly', 'slate', 'solar', 'superhero']
# Install path (the parent directory of this script)
INSTALL_DIRECTORY = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + '/..')
# Database
DATABASE_NAME = "mycodo.db"
ALEMBIC_PATH = os.path.join(INSTALL_DIRECTORY, 'alembic_db')
DATABASE_PATH = os.path.join(INSTALL_DIRECTORY, 'databases')
ALEMBIC_UPGRADE_POST = os.path.join(ALEMBIC_PATH, 'alembic_post_upgrade_versions')
SQL_DATABASE_MYCODO = os.path.join(DATABASE_PATH, DATABASE_NAME)
MYCODO_DB_PATH = 'sqlite:///' + SQL_DATABASE_MYCODO
# Misc paths
PATH_1WIRE = '/sys/bus/w1/devices/'
PATH_CONTROLLERS = os.path.join(INSTALL_DIRECTORY, 'mycodo/controllers')
PATH_FUNCTIONS = os.path.join(INSTALL_DIRECTORY, 'mycodo/functions')
PATH_FUNCTION_ACTIONS = os.path.join(INSTALL_DIRECTORY, 'mycodo/function_actions')
PATH_INPUTS = os.path.join(INSTALL_DIRECTORY, 'mycodo/inputs')
PATH_OUTPUTS = os.path.join(INSTALL_DIRECTORY, 'mycodo/outputs')
PATH_WIDGETS = os.path.join(INSTALL_DIRECTORY, 'mycodo/widgets')
PATH_FUNCTIONS_CUSTOM = os.path.join(PATH_FUNCTIONS, 'custom_functions')
PATH_FUNCTION_ACTIONS_CUSTOM = os.path.join(PATH_FUNCTION_ACTIONS, 'custom_function_actions')
PATH_INPUTS_CUSTOM = os.path.join(PATH_INPUTS, 'custom_inputs')
PATH_OUTPUTS_CUSTOM = os.path.join(PATH_OUTPUTS, 'custom_outputs')
PATH_WIDGETS_CUSTOM = os.path.join(PATH_WIDGETS, 'custom_widgets')
PATH_USER_SCRIPTS = os.path.join(INSTALL_DIRECTORY, 'mycodo/user_scripts')
PATH_HTML_USER = os.path.join(INSTALL_DIRECTORY, 'mycodo/mycodo_flask/templates/user_templates')
PATH_PYTHON_CODE_USER = os.path.join(INSTALL_DIRECTORY, 'mycodo/user_python_code')
PATH_MEASUREMENTS_BACKUP = os.path.join(INSTALL_DIRECTORY, 'mycodo/backup_measurements')
PATH_SETTINGS_BACKUP = os.path.join(INSTALL_DIRECTORY, 'mycodo/backup_settings')
USAGE_REPORTS_PATH = os.path.join(INSTALL_DIRECTORY, 'output_usage_reports')
DEPENDENCY_INIT_FILE = os.path.join(INSTALL_DIRECTORY, '.dependency')
UPGRADE_INIT_FILE = os.path.join(INSTALL_DIRECTORY, '.upgrade')
BACKUP_PATH = '/var/Mycodo-backups' # Where Mycodo backups are stored
# Log files
LOG_PATH = '/var/log/mycodo' # Where generated logs are stored
LOGIN_LOG_FILE = os.path.join(LOG_PATH, 'login.log')
DAEMON_LOG_FILE = os.path.join(LOG_PATH, 'mycodo.log')
KEEPUP_LOG_FILE = os.path.join(LOG_PATH, 'mycodokeepup.log')
BACKUP_LOG_FILE = os.path.join(LOG_PATH, 'mycodobackup.log')
DEPENDENCY_LOG_FILE = os.path.join(LOG_PATH, 'mycododependency.log')
UPGRADE_LOG_FILE = os.path.join(LOG_PATH, 'mycodoupgrade.log')
UPGRADE_TMP_LOG_FILE = '/tmp/mycodoupgrade.log'
RESTORE_LOG_FILE = os.path.join(LOG_PATH, 'mycodorestore.log')
HTTP_ACCESS_LOG_FILE = '/var/log/nginx/access.log'
HTTP_ERROR_LOG_FILE = '/var/log/nginx/error.log'
# Lock files
LOCK_PATH = '/var/lock'
LOCK_FILE_STREAM = os.path.join(LOCK_PATH, 'mycodo-camera-stream.pid')
# Run files
RUN_PATH = '/var/run'
FRONTEND_PID_FILE = os.path.join(RUN_PATH, 'mycodoflask.pid')
DAEMON_PID_FILE = os.path.join(RUN_PATH, 'mycodo.pid')
# Remote admin
STORED_SSL_CERTIFICATE_PATH = os.path.join(
INSTALL_DIRECTORY, 'mycodo/mycodo_flask/ssl_certs/remote_admin')
# Cameras
PATH_CAMERAS = os.path.join(INSTALL_DIRECTORY, 'cameras')
# Notes
PATH_NOTE_ATTACHMENTS = os.path.join(INSTALL_DIRECTORY, 'note_attachments')
# Determine if running in a Docker container
DOCKER_CONTAINER = os.environ.get('DOCKER_CONTAINER', False) == 'TRUE'
# Pyro5 URI/host, used by mycodo_client.py
if DOCKER_CONTAINER:
PYRO_URI = 'PYRO:mycodo.pyro_server@mycodo_daemon:9080'
else:
PYRO_URI = 'PYRO:mycodo.pyro_server@127.0.0.1:9080'
# Influx sensor/device measurement database
INFLUXDB_HOST = 'localhost' if not DOCKER_CONTAINER else 'influxdb'
INFLUXDB_PORT = 8086
INFLUXDB_USER = 'mycodo'
INFLUXDB_PASSWORD = 'mmdu77sj3nIoiajjs'
INFLUXDB_DATABASE = 'mycodo_db'
# Anonymous statistics
STATS_INTERVAL = 86400
STATS_HOST = 'fungi.kylegabriel.com'
STATS_PORT = 8086
STATS_USER = 'mycodo_stats'
STATS_PASSWORD = 'Io8Nasr5JJDdhPOj32222'
STATS_DATABASE = 'mycodo_stats'
STATS_CSV = os.path.join(INSTALL_DIRECTORY, 'statistics.csv')
ID_FILE = os.path.join(INSTALL_DIRECTORY, 'statistics.id')
# Login restrictions
LOGIN_ATTEMPTS = 5
LOGIN_BAN_SECONDS = 600 # 10 minutes
# Check for upgrade every 2 days (if enabled)
UPGRADE_CHECK_INTERVAL = 172800
RELEASE_URL = 'https://api.github.com/repos/kizniche/Mycodo/tags'
class ProdConfig(object):
"""Production Configuration."""
SQL_DATABASE_MYCODO = os.path.join(DATABASE_PATH, DATABASE_NAME)
MYCODO_DB_PATH = 'sqlite:///{}'.format(SQL_DATABASE_MYCODO)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(SQL_DATABASE_MYCODO)
SQLALCHEMY_TRACK_MODIFICATIONS = False
FLASK_PROFILER = {
"enabled": True,
"storage": {
"engine": "sqlalchemy",
"db_url": 'sqlite:///{}'.format(os.path.join(DATABASE_PATH, 'profile.db'))
},
"basicAuth": {
"enabled": True,
"username": "admin231",
"password": "admin421378956"
},
"ignore": [
"^/static/.*",
"/login",
"/settings/users"
],
"endpointRoot": "mycodo-flask-profiler"
}
WTF_CSRF_TIME_LIMIT = 60 * 60 * 24 * 7 # 1 week expiration
REMEMBER_COOKIE_DURATION = timedelta(days=90)
SESSION_TYPE = "filesystem"
# Ensure file containing the Flask secret_key exists
FLASK_SECRET_KEY_PATH = os.path.join(DATABASE_PATH, 'flask_secret_key')
if not os.path.isfile(FLASK_SECRET_KEY_PATH):
secret_key = binascii.hexlify(os.urandom(32)).decode()
if not os.path.exists(DATABASE_PATH):
os.makedirs(DATABASE_PATH)
with open(FLASK_SECRET_KEY_PATH, 'w') as file:
file.write(secret_key)
SECRET_KEY = open(FLASK_SECRET_KEY_PATH, 'rb').read()
class TestConfig(object):
"""Testing Configuration."""
SQLALCHEMY_DATABASE_URI = 'sqlite://' # in-memory db only. tests drop the tables after they run
SQLALCHEMY_TRACK_MODIFICATIONS = False
PRESERVE_CONTEXT_ON_EXCEPTION = False
RATELIMIT_ENABLED = False
SECRET_KEY = '1234'
SESSION_TYPE = "filesystem"
TESTING = True
DEBUG = True
| kizniche/Mycodo | mycodo/config.py | Python | gpl-3.0 | 30,449 |
import os
import shutil
from additional.signals import signal_err, signal_ok, signal_done
def uninstall():
sl_dir = '/usr/share/'
question = input('For uninstall Sanelotto enter "uninstall": ')
if question != 'uninstall':
print('Not uninstalled')
return False
if not os.access('/usr/local/bin/sanelotto', os.W_OK):
signal_err('Permission denied')
return False
try:
os.remove('/usr/local/bin/sanelotto')
signal_ok('Link was deleted')
except:
signal_err('Failed to delete link. Permission denied or link not exist.')
return False
try:
shutil.rmtree(sl_dir + '/Sanelotto')
signal_ok('Sanelotto directory was deleted')
except:
signal_err('Failed to delete Sanelotto directory')
return False
signal_done('Sanelotto was deleted') | Vladimir37/Sanelotto | routes/uninstall.py | Python | mit | 862 |
import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
nr_examples= 200
modelfilename = '../data/nnfromscratch_eq' + str(nr_examples) + '.pkl'
def activation(x):
return np.tanh(x) #1/ (1 + np.exp(-x)) -0.5 #np.tanh(x) # np.log(1 + np.exp(x))
def the_function(x):
return np.sin(2*x)
nn_input_dim = 1 # input layer dimensionality
inputs = 6 * np.random.random((nr_examples, nn_input_dim)) - 3
targets = the_function(inputs)
y = targets[...,0].reshape(nr_examples,1)
num_examples = len(targets) # training set size
nn_output_dim = 1 # output layer dimensionality
# Gradient descent parameters (I picked these by hand)
reg_lambda = 0.0 # 0.01 # regularization strength
# Helper function to evaluate the total loss on the dataset
def calculate_loss(model, X = inputs):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z1 = X.dot(W1) + b1
a1 = activation(z1)
z2 = a1.dot(W2) + b2
#exp_scores = np.exp(z2)
#probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
#corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sqrt(np.sum((z2-y)**2))
#data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
#data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1./num_examples * data_loss
# Helper function to predict an output (0 or 1)
def predict(model, x=inputs, res = y):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation
z1 = x.dot(W1) + b1
a1 = activation(z1)
z2 = a1.dot(W2) + b2
#exp_scores = np.exp(z2)
#probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
print("mean error " + str(np.mean(np.abs(z2-res))))
return z2
# This function learns parameters for the neural network and returns the model.
# - nn_hdim: Number of nodes in the hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss every 1000 iterations
def build_model(nn_hdim, X = inputs, num_passes=50000, print_loss=False,save =True , epsilon = 0.001):
# Initialize the parameters to random values. We need to learn these.
np.random.seed(0)
W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)
b1 = np.ones((1, nn_hdim))
W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)
b2 = np.ones((1, nn_output_dim))
# This is what we return at the end
model = {}
old_loss = 1000.
# Gradient descent. For each batch...
for i in xrange(0, num_passes + 1):
# Forward propagation
z1 = X.dot(W1) + b1
a1 = activation(z1)
z2 = a1.dot(W2) + b2
#exp_scores = np.exp(z2)
#probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Backpropagation
delta3 = z2-y
#delta3[range(num_examples), len(y)] -= 1
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
#print('delta2 = ' + str(delta2))
dW1 = np.dot(X.T, delta2)
db1 = np.sum(delta2, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += reg_lambda * W2
dW1 += reg_lambda * W1
# Gradient descent parameter update
W1 += -epsilon * dW1
b1 += -epsilon * db1
W2 += -epsilon * dW2
b2 += -epsilon * db2
# Assign new parameters to the model
model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
loss = calculate_loss(model)
if print_loss and i % 10000 == 0:
print 'Loss after iteration ' +str(i) + ': ' + str(loss)
if loss > (old_loss):
print(str(i)+' loss = ' + str(loss) +' old_loss = ' + str(old_loss))
break
old_loss = np.min((loss, old_loss))
if save:
gg = open(modelfilename, 'wb')
pickle.dump(model, gg, protocol=pickle.HIGHEST_PROTOCOL)
gg.close()
return model
def load_params(filename):
gg = open(filename, 'rb')
params = pickle.load(gg)
gg.close()
return params
# Build a model with a 3-dimensional hidden layer
model = build_model(20, print_loss=True)
model = load_params(modelfilename)
pred = predict(model)
test_nr = 100
#inputs_test = 12 * np.random.random((test_nr, nn_input_dim)) - 6
inputs_test = np.arange(-6, 6, 12./test_nr).reshape(test_nr,1)
targets_test = the_function(inputs_test)
y_test = targets_test[...,0].reshape(test_nr,1)
pred = predict(model, x=inputs_test, res = y_test)
sortx = np.sort(inputs_test[...,0])
sortedarrA = np.asarray(sorted(zip(inputs_test, pred[...,0])))
sortedarrB = np.asarray(sorted(zip(inputs_test,targets_test[...,0])))
predicplot, = plt.plot(sortx, sortedarrA[...,1], label='Prediction')
actualplot, = plt.plot(sortx, sortedarrB[...,1], label='Actual')
plt.title('One hidden layer')
plt.legend(handles=[predicplot, actualplot])
plt.show()
#print(pred)
| laputian/dml | equation/nnfromscratch_eq_func.py | Python | mit | 5,297 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-22 22:46
from __future__ import unicode_literals
from django.db import migrations, models
import tests.mockapp.models
class Migration(migrations.Migration):
dependencies = [
('mockapp', '0004_load_fixtures'),
]
operations = [
migrations.CreateModel(
name='MockAllField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('charfield', models.CharField(max_length=100)),
('integerfield', models.IntegerField()),
('floatfield', models.FloatField()),
('decimalfield', models.DecimalField(decimal_places=2, max_digits=5)),
('boolfield', models.BooleanField(default=False)),
('datefield', models.DateField(default=tests.mockapp.models.get_random_date)),
('datetimefield', models.DateTimeField(default=tests.mockapp.models.get_random_datetime)),
],
),
]
| jannon/drf-haystack | tests/mockapp/migrations/0006_mockallfield.py | Python | mit | 1,079 |
# -*- coding: utf-8 -*-
import thesportsdb
import sqlite3 as lite
import os
import sys
import xbmc
import xbmcaddon
import urllib
#TODO avoid repetition here!######
addon_id = 'script.sportscenter'
settings = xbmcaddon.Addon(id=addon_id)
addonpath = settings.getAddonInfo('path').decode('utf-8')
sys.path.append(os.path.join(addonpath,'resources','lib'))
profilepath= xbmc.translatePath(settings.getAddonInfo('profile')).decode('utf-8')
########
from centerutils.common_variables import *
sc_database = os.path.join(profilepath,'sc_database.db')
templatefolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'templates')
class Creator:
#variables
def __init__(self,):
self.team_template = os.path.join(templatefolder,'team.txt')
self.league_template = os.path.join(templatefolder,'league.txt')
self.event_template = os.path.join(templatefolder,'event.txt')
return
#all
def create_table_all(self,):
self.create_table_league()
self.create_table_team()
self.create_table_event()
return
def drop_and_create_table_all(self,):
self.drop_and_create_table_league()
self.drop_and_create_table_team()
self.drop_and_create_table_event()
return
#league
def create_table_league(self,):
con = lite.connect(sc_database)
lines = []
with open(self.league_template) as f:
lines = f.readlines()
query_string = ''
for line in lines:
query_string = query_string + line
cur = con.cursor()
cur.execute("CREATE TABLE League("+query_string+")")
if con:
print "SportsCenter: Table League created successfully!"
con.close()
return
def drop_table_league(self,):
con = lite.connect(sc_database)
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS League")
if con:
print "SportsCenter: Table League dropped successfully!"
con.close()
return
def drop_and_create_table_league(self,):
self.drop_table_league()
self.create_table_league()
return
#team
def create_table_team(self,):
con = lite.connect(sc_database)
lines = []
with open(self.team_template) as f:
lines = f.readlines()
query_string = ''
for line in lines:
query_string = query_string + line
cur = con.cursor()
cur.execute("CREATE TABLE Team("+query_string+")")
if con:
print "SportsCenter: Table Team created successfully!"
con.close()
return
def drop_table_team(self,):
con = lite.connect(sc_database)
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Team")
if con:
print "SportsCenter: Table Team dropped successfully!"
con.close()
return
def drop_and_create_table_team(self,):
self.drop_table_team()
self.create_table_team()
return
#event
def create_table_event(self,):
con = lite.connect(sc_database)
lines = []
with open(self.event_template) as f:
lines = f.readlines()
query_string = ''
for line in lines:
query_string = query_string + line
cur = con.cursor()
cur.execute("CREATE TABLE Event("+query_string+")")
if con:
print "SportsCenter: Table Event created successfully!"
con.close()
return
def drop_table_event(self,):
con = lite.connect(sc_database)
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Event")
if con:
print "SportsCenter: Table Event dropped successfully!"
con.close()
return
def drop_and_create_table_event(self,):
self.drop_table_event()
self.create_table_event()
return
class Checker:
def check_if_table_exists(self,table):
con = lite.connect(sc_database)
cur = con.cursor()
try:
cur.execute("SELECT * FROM "+table)
con.close()
return True
except:
con.close()
return False
def create_table_if_not_exists(self,table):
if table == 'League': pass
elif table == 'Team': pass
elif table == 'Event': pass
else: return
check = self.check_if_table_exists(table)
if not check:
if table == 'League': Creator().create_table_league()
elif table == 'Team': Creator().create_table_team()
elif table == 'Event': Creator().create_table_event()
return
class Inserter:
def __init__(self,):
pass
def global_inserter(self,table,dictionary,file_folder=None):
con = lite.connect(sc_database)
cur = con.cursor()
cur.execute('select * from '+table)
colums = list(map(lambda x: x[0], cur.description))
totalcolums = len(colums)
totalitems = len(cur.fetchall())
next = totalitems + 1
key_array = '('
key_array_add = ''
i=0
for key in colums:
if i != (totalcolums-1): key_array_add = key_array_add + key.replace('"','').replace("'","") + ','
else: key_array_add = key_array_add + key.replace('"','').replace("'","") + ')'
i+=1
key_array = key_array + key_array_add
values_array_tmp = []
for key in sorted(dictionary.keys()):
if key in colums:
if dictionary[key] == None or dictionary[key] == '': values_array_tmp.append('null')
else:
if key != 'strSport': values_array_tmp.append(dictionary[key].replace('"','').replace("'",""))
else: values_array_tmp.append(urllib.quote(dictionary[key].replace('"','').replace("'","").lower()))
values_array = '('+str(next)+','
i=0
for key in values_array_tmp:
if i != (len(values_array_tmp)-1):
values_array = values_array + "'"+key +"'"+ ','
else:
if table != 'Event':
values_array = values_array +"'"+ key +"'"+')'
else:
values_array = values_array +"'"+ key +"','"+file_folder+"')"
i+=1
sql_string = "INSERT INTO "+table+" "+key_array+" VALUES "+values_array+";"
cur.execute(sql_string)
if con:
con.commit()
con.close()
print "SportsCenter: added to " + table + "!"
return
def insert_team(self,_team_id_or_dict_):
if type(_team_id_or_dict_) == str:
team_dictionary = thesportsdb.Lookups(tsdbkey).lookupteam(_team_id_or_dict_)["teams"][0]
elif type(_team_id_or_dict_) == dict:
team_dictionary = _team_id_or_dict_
else: team_dictionary = None
if team_dictionary:
#here we check if the table exists if not we create it
Checker().create_table_if_not_exists('Team')
#send the dictionary to global inserter
self.global_inserter('Team',team_dictionary)
return
def insert_league(self,_league_id_or_dict_):
if type(_league_id_or_dict_) == str:
league_dictionary = thesportsdb.Lookups(tsdbkey).lookupleague(_league_id_or_dict_)["leagues"][0]
elif type(_league_id_or_dict_) == dict:
league_dictionary = _league_id_or_dict_
else: league_dictionary = None
if league_dictionary:
#here we check if the table exists if not we create it
Checker().create_table_if_not_exists('League')
#send the dictionary to global inserter
self.global_inserter('League',league_dictionary)
return
def insert_event(self,_event_id_or_dict_,folder_file):
if type(_event_id_or_dict_) == str:
event_dictionary = thesportsdb.Lookups(tsdbkey).lookupevent(_event_id_or_dict_)["events"][0]
elif type(_event_id_or_dict_) == dict:
event_dictionary = _event_id_or_dict_
else: event_dictionary = None
if event_dictionary:
#here we check if the table exists if not we create it
Checker().create_table_if_not_exists('Event')
#send the dictionary to global inserter
self.global_inserter('Event',event_dictionary,folder_file)
return
class Remover:
def __init__(self,):
pass
def global_remover(self,table,db_key,sc_id):
con = lite.connect(sc_database)
cur = con.cursor()
cur.execute("delete from "+table+" where "+db_key+" = '%s' " % sc_id)
print "SportsCenter: "+db_key+" = "+sc_id+" removed from " + table +"!"
if con:
con.commit()
con.close()
return
def remove_team(self,_team_id_or_dict_):
if type(_team_id_or_dict_) == str:
sc_id = _team_id_or_dict_
elif type(_event_id_or_dict_) == dict:
sc_id = thesportsdb.Teams().get_id(_team_id_or_dict_)
else: sc_id = None
if sc_id:
self.global_remover('Team','idTeam',sc_id)
return
def remove_league(self,_league_id_or_dict_):
if type(_league_id_or_dict_) == str:
sc_id = _league_id_or_dict_
elif type(_league_id_or_dict_) == dict:
sc_id = thesportsdb.Leagues().get_id(_league_id_or_dict_)
else: sc_id = None
if sc_id:
self.global_remover('League','idLeague',sc_id)
return
def remove_event(self,_event_id_or_dict_):
if type(_event_id_or_dict_) == str:
sc_id = _event_id_or_dict_
elif type(_event_id_or_dict_) == dict:
sc_id = thesportsdb.Events().get_eventid(_event_id_or_dict_)
else: sc_id = None
if sc_id:
self.global_remover('Event','idEvent',sc_id)
return
class Updater:
def __init__(self,):
pass
def update_team(self,_team_id_or_dict_):
Remover().remove_team(_team_id_or_dict_)
Inserter().insert_team(_team_id_or_dict_)
return
def update_league(self,_league_id_or_dict_):
Remover().remove_league(_league_id_or_dict_)
Inserter().insert_league(_league_id_or_dict_)
return
def update_event(self,_event_id_or_dict_):
Remover().remove_event(_event_id_or_dict_)
Inserter().insert_event(_event_id_or_dict_)
return
class Retriever:
def __init__(self,):
pass
def get_all_teams(self,sport,league,team):
teams = []
#decide which sql_string to use here
if not sport and not league and not team:
sql_cmd = "SELECT * FROM Team"
elif sport and not league and not team:
sql_cmd = "SELECT * FROM Team where strSport = '"+sport+"'"
elif sport and league and not team:
sql_cmd = "SELECT * FROM Team where strSport = '"+sport+"' AND idLeague = '"+league+'"'
else:
sql_cmd = "SELECT * FROM Team where idTeam = '"+team+"'"
#All looks the same below
con = lite.connect(sc_database)
with con:
cur = con.cursor()
cur.execute(sql_cmd)
colums = list(map(lambda x: x[0], cur.description))
rows = cur.fetchall()
for row in rows:
row_dict = {}
i=0
for info in row:
row_dict[colums[i]] = info
i +=1
if row_dict: teams.append(row_dict)
if con:
con.close()
return teams
def get_all_leagues(self,sport,league):
leagues = []
#decide which sql_string to use here
if not sport and not league:
sql_cmd = "SELECT * FROM League"
elif sport and not league:
sql_cmd = "SELECT * FROM League where strSport = '"+sport+"'"
elif sport and league:
sql_cmd = "SELECT * FROM League where strSport = '"+sport+"' AND idLeague = '"+league+"'"
#All looks the same below
con = lite.connect(sc_database)
with con:
cur = con.cursor()
cur.execute(sql_cmd)
colums = list(map(lambda x: x[0], cur.description))
rows = cur.fetchall()
for row in rows:
row_dict = {}
i=0
for info in row:
row_dict[colums[i]] = info
i +=1
if row_dict:
leagues.append(row_dict)
if con:
con.close()
return leagues
def get_all_events(self,sport,season,league,team):
events = []
#decide which sql_string to use here
if not sport and not season and not league and not team:
sql_cmd = "SELECT * FROM Event"
elif sport and league and season and not team:
sql_cmd = "SELECT * FROM Event where strSport = '"+sport+"' AND idLeague = '"+league+"' AND strSeason = '"+season+"'"
elif sport and not season and not league and not team:
sql_cmd = "SELECT * FROM Event where strSport = '"+sport+"'"
elif sport and season and not league and not team:
sql_cmd = "SELECT * FROM Event where strSport = '"+sport+"' AND strSeason = '"+season+"'"
elif sport and league and not season and not team:
sql_cmd = "SELECT * FROM Event where strSport = '"+sport+"' AND idLeague = '"+league+"'"
elif sport and not league and not season and team:
sql_cmd = "SELECT * FROM Event where (strSport = '"+sport+"' AND idAwayTeam = '"+team+"') OR (strSport = '"+sport+"' AND idHomeTeam = '"+team+"')"
elif sport and league and not season and team:
sql_cmd = "SELECT * FROM Event where (strSport = '"+sport+"' AND idAwayTeam = '"+team+"' AND idLeague = '"+league+"') OR (strSport = '"+sport+"' AND idHomeTeam = '"+team+"' AND idLeague = '"+league+"')"
elif sport and league and season and team:
sql_cmd = "SELECT * FROM Event where (strSport = '"+sport+"' AND idAwayTeam = '"+team+"' AND idLeague = '"+league+"' AND strSeason = '"+season+"') OR (strSport = '"+sport+"' AND idHomeTeam = '"+team+"' AND idLeague = '"+league+"' AND strSeason = '"+season+"')"
#All looks the same below
con = lite.connect(sc_database)
with con:
cur = con.cursor()
cur.execute(sql_cmd)
colums = list(map(lambda x: x[0], cur.description))
rows = cur.fetchall()
for row in rows:
row_dict = {}
i=0
for info in row:
row_dict[colums[i]] = info
i +=1
if row_dict: events.append(row_dict)
if con:
con.close()
return events
| enen92/script.sportscenter | resources/lib/centerutils/database/sc_database.py | Python | gpl-2.0 | 12,616 |
import json
import os
import os.path as opath
import shutil
import subprocess
from codegen.datatypes import build_datatype_py, write_datatype_py
from codegen.compatibility import (
write_deprecated_datatypes,
write_graph_objs_graph_objs,
DEPRECATED_DATATYPES,
)
from codegen.figure import write_figure_classes
from codegen.utils import (
TraceNode,
PlotlyNode,
LayoutNode,
FrameNode,
write_init_py,
ElementDefaultsNode,
build_from_imports_py,
)
from codegen.validators import (
write_validator_py,
write_data_validator_py,
get_data_validator_instance,
)
# Import notes
# ------------
# Nothing from the plotly/ package should be imported during code
# generation. This introduces a lot of complexity regarding when imports
# happen relative to when various stages of code generation occur. Instead,
# helpers that are only needed during code generation should reside in the
# codegen/ package, and helpers used both during code generation and at
# runtime should reside in the _plotly_utils/ package.
# ----------------------------------------------------------------------------
def preprocess_schema(plotly_schema):
"""
Central location to make changes to schema before it's seen by the
PlotlyNode classes
"""
# Update template
# ---------------
layout = plotly_schema["layout"]["layoutAttributes"]
# Create codegen-friendly template scheme
template = {
"data": {
trace + "s": {"items": {trace: {}}, "role": "object"}
for trace in plotly_schema["traces"]
},
"layout": {},
"description": """\
Default attributes to be applied to the plot.
This should be a dict with format: `{'layout': layoutTemplate, 'data':
{trace_type: [traceTemplate, ...], ...}}` where `layoutTemplate` is a dict
matching the structure of `figure.layout` and `traceTemplate` is a dict
matching the structure of the trace with type `trace_type` (e.g. 'scatter').
Alternatively, this may be specified as an instance of
plotly.graph_objs.layout.Template.
Trace templates are applied cyclically to
traces of each type. Container arrays (eg `annotations`) have special
handling: An object ending in `defaults` (eg `annotationdefaults`) is
applied to each array item. But if an item has a `templateitemname`
key we look in the template array for an item with matching `name` and
apply that instead. If no matching `name` is found we mark the item
invisible. Any named template item not referenced is appended to the
end of the array, so this can be used to add a watermark annotation or a
logo image, for example. To omit one of these items on the plot, make
an item with matching `templateitemname` and `visible: false`.""",
}
layout["template"] = template
# Rename concentrationscales to colorscale to match conventions
items = plotly_schema["traces"]["sankey"]["attributes"]["link"]["colorscales"][
"items"
]
if "concentrationscales" in items:
items["colorscale"] = items.pop("concentrationscales")
def perform_codegen():
# Set root codegen output directory
# ---------------------------------
# (relative to project root)
abs_file_path = opath.realpath(__file__)
packages_py = opath.dirname(opath.dirname(opath.dirname(abs_file_path)))
outdir = opath.join(packages_py, "plotly", "plotly")
# Delete prior codegen output
# ---------------------------
validators_pkgdir = opath.join(outdir, "validators")
if opath.exists(validators_pkgdir):
shutil.rmtree(validators_pkgdir)
graph_objs_pkgdir = opath.join(outdir, "graph_objs")
if opath.exists(graph_objs_pkgdir):
shutil.rmtree(graph_objs_pkgdir)
# plotly/datatypes is not used anymore, but was at one point so we'll
# still delete it if we find it in case a developer is upgrading from an
# older version
datatypes_pkgdir = opath.join(outdir, "datatypes")
if opath.exists(datatypes_pkgdir):
shutil.rmtree(datatypes_pkgdir)
# Load plotly schema
# ------------------
plot_schema_path = opath.join(
packages_py, "plotly", "codegen", "resources", "plot-schema.json"
)
with open(plot_schema_path, "r") as f:
plotly_schema = json.load(f)
# Preprocess Schema
# -----------------
preprocess_schema(plotly_schema)
# Build node lists
# ----------------
# ### TraceNode ###
base_traces_node = TraceNode(plotly_schema)
compound_trace_nodes = PlotlyNode.get_all_compound_datatype_nodes(
plotly_schema, TraceNode
)
all_trace_nodes = PlotlyNode.get_all_datatype_nodes(plotly_schema, TraceNode)
# ### LayoutNode ###
compound_layout_nodes = PlotlyNode.get_all_compound_datatype_nodes(
plotly_schema, LayoutNode
)
layout_node = compound_layout_nodes[0]
all_layout_nodes = PlotlyNode.get_all_datatype_nodes(plotly_schema, LayoutNode)
subplot_nodes = [
node
for node in layout_node.child_compound_datatypes
if node.node_data.get("_isSubplotObj", False)
]
layout_array_nodes = [
node
for node in layout_node.child_compound_datatypes
if node.is_array_element and node.has_child("xref") and node.has_child("yref")
]
# ### FrameNode ###
compound_frame_nodes = PlotlyNode.get_all_compound_datatype_nodes(
plotly_schema, FrameNode
)
frame_node = compound_frame_nodes[0]
all_frame_nodes = PlotlyNode.get_all_datatype_nodes(plotly_schema, FrameNode)
# ### All nodes ###
all_datatype_nodes = all_trace_nodes + all_layout_nodes + all_frame_nodes
all_compound_nodes = [
node
for node in all_datatype_nodes
if node.is_compound and not isinstance(node, ElementDefaultsNode)
]
# Write out validators
# --------------------
# # ### Layout ###
for node in all_layout_nodes:
write_validator_py(outdir, node)
# ### Trace ###
for node in all_trace_nodes:
write_validator_py(outdir, node)
# ### Frames ###
for node in all_frame_nodes:
write_validator_py(outdir, node)
# ### Data (traces) validator ###
write_data_validator_py(outdir, base_traces_node)
# Alls
# ----
alls = {}
# Write out datatypes
# -------------------
for node in all_compound_nodes:
write_datatype_py(outdir, node)
# ### Deprecated ###
# These are deprecated legacy datatypes like graph_objs.Marker
write_deprecated_datatypes(outdir)
# Write figure class to graph_objs
# --------------------------------
data_validator = get_data_validator_instance(base_traces_node)
layout_validator = layout_node.get_validator_instance()
frame_validator = frame_node.get_validator_instance()
write_figure_classes(
outdir,
base_traces_node,
data_validator,
layout_validator,
frame_validator,
subplot_nodes,
layout_array_nodes,
)
# Write validator __init__.py files
# ---------------------------------
# ### Write __init__.py files for each validator package ###
validator_rel_class_imports = {}
for node in all_datatype_nodes:
if node.is_mapped:
continue
key = node.parent_path_parts
validator_rel_class_imports.setdefault(key, []).append(
f"._{node.name_property}.{node.name_validator_class}"
)
# Add Data validator
root_validator_pairs = validator_rel_class_imports[()]
root_validator_pairs.append("._data.DataValidator")
# Output validator __init__.py files
validators_pkg = opath.join(outdir, "validators")
for path_parts, rel_classes in validator_rel_class_imports.items():
write_init_py(validators_pkg, path_parts, [], rel_classes)
# Write datatype __init__.py files
# --------------------------------
datatype_rel_class_imports = {}
datatype_rel_module_imports = {}
for node in all_compound_nodes:
key = node.parent_path_parts
# class import
datatype_rel_class_imports.setdefault(key, []).append(
f"._{node.name_undercase}.{node.name_datatype_class}"
)
# submodule import
if node.child_compound_datatypes:
datatype_rel_module_imports.setdefault(key, []).append(
f".{node.name_undercase}"
)
# ### Write plotly/graph_objs/graph_objs.py ###
# This if for backward compatibility. It just imports everything from
# graph_objs/__init__.py
write_graph_objs_graph_objs(outdir)
# ### Add Figure and FigureWidget ###
root_datatype_imports = datatype_rel_class_imports[()]
root_datatype_imports.append("._figure.Figure")
# ### Add deprecations ###
for dep_clas in DEPRECATED_DATATYPES:
root_datatype_imports.append(f"._deprecations.{dep_clas}")
optional_figure_widget_import = f"""
if sys.version_info < (3, 7):
try:
import ipywidgets as _ipywidgets
from distutils.version import LooseVersion as _LooseVersion
if _LooseVersion(_ipywidgets.__version__) >= _LooseVersion("7.0.0"):
from ..graph_objs._figurewidget import FigureWidget
else:
raise ImportError()
except Exception:
from ..missing_ipywidgets import FigureWidget
else:
__all__.append("FigureWidget")
orig_getattr = __getattr__
def __getattr__(import_name):
if import_name == "FigureWidget":
try:
import ipywidgets
from distutils.version import LooseVersion
if LooseVersion(ipywidgets.__version__) >= LooseVersion("7.0.0"):
from ..graph_objs._figurewidget import FigureWidget
return FigureWidget
else:
raise ImportError()
except Exception:
from ..missing_ipywidgets import FigureWidget
return FigureWidget
return orig_getattr(import_name)
"""
# ### __all__ ###
for path_parts, class_names in alls.items():
if path_parts and class_names:
filepath = opath.join(outdir, "graph_objs", *path_parts, "__init__.py")
with open(filepath, "at") as f:
f.write(f"\n__all__ = {class_names}")
# ### Output datatype __init__.py files ###
graph_objs_pkg = opath.join(outdir, "graph_objs")
for path_parts in datatype_rel_class_imports:
rel_classes = sorted(datatype_rel_class_imports[path_parts])
rel_modules = sorted(datatype_rel_module_imports.get(path_parts, []))
if path_parts == ():
init_extra = optional_figure_widget_import
else:
init_extra = ""
write_init_py(graph_objs_pkg, path_parts, rel_modules, rel_classes, init_extra)
# ### Output graph_objects.py alias
graph_objects_rel_classes = [
"..graph_objs." + rel_path.split(".")[-1]
for rel_path in datatype_rel_class_imports[()]
]
graph_objects_rel_modules = [
"..graph_objs." + rel_module.split(".")[-1]
for rel_module in datatype_rel_module_imports[()]
]
graph_objects_init_source = build_from_imports_py(
graph_objects_rel_modules,
graph_objects_rel_classes,
init_extra=optional_figure_widget_import,
)
graph_objects_path = opath.join(outdir, "graph_objects", "__init__.py")
os.makedirs(opath.join(outdir, "graph_objects"), exist_ok=True)
with open(graph_objects_path, "wt") as f:
f.write(graph_objects_init_source)
# ### Run black code formatter on output directories ###
subprocess.call(["black", "--target-version=py27", validators_pkgdir])
subprocess.call(["black", "--target-version=py27", graph_objs_pkgdir])
subprocess.call(["black", "--target-version=py27", graph_objects_path])
if __name__ == "__main__":
perform_codegen()
| plotly/plotly.py | packages/python/plotly/codegen/__init__.py | Python | mit | 11,939 |
import dateparser
import random
from collections import namedtuple
import pytest
from cfme.containers.image import Image
from cfme.containers.provider import (ContainersProvider, ContainersTestItem,
refresh_and_navigate)
from cfme.utils.wait import wait_for
from cfme.configure.tasks import delete_all_tasks
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [
pytest.mark.meta(server_roles='+smartproxy'),
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(1),
pytest.mark.provider([ContainersProvider], scope='function')]
AttributeToVerify = namedtuple('AttributeToVerify', ['table', 'attr', 'verifier'])
TESTED_ATTRIBUTES__openscap_off = (
AttributeToVerify('configuration', 'OpenSCAP Results', bool),
AttributeToVerify('configuration', 'OpenSCAP HTML', lambda val: val == 'Available'),
AttributeToVerify('configuration', 'Last scan', dateparser.parse)
)
TESTED_ATTRIBUTES__openscap_on = TESTED_ATTRIBUTES__openscap_off + (
AttributeToVerify('compliance', 'Status', lambda val: val.lower() != 'never verified'),
AttributeToVerify('compliance', 'History', lambda val: val == 'Available')
)
TEST_ITEMS = (
pytest.mark.polarion('CMP-9496')(
ContainersTestItem(Image, 'CMP-9496',
is_openscap=False,
tested_attr=TESTED_ATTRIBUTES__openscap_off)
),
pytest.mark.polarion('CMP-10064')(
ContainersTestItem(Image, 'CMP-10064',
is_openscap=True,
tested_attr=TESTED_ATTRIBUTES__openscap_on)
)
)
NUM_SELECTED_IMAGES = 1
@pytest.fixture(scope='function')
def delete_all_container_tasks():
delete_all_tasks('AllTasks')
@pytest.fixture(scope='function')
def random_image_instance(appliance):
collection = appliance.collections.container_images
# add filter for select only active(not archived) images from redHat registry
filter_image_collection = collection.filter({'active': True, 'redhat_registry': True})
return random.sample(filter_image_collection.all(), NUM_SELECTED_IMAGES).pop()
@pytest.mark.polarion('10030')
def test_manage_policies_navigation(random_image_instance):
random_image_instance.assign_policy_profiles('OpenSCAP profile')
@pytest.mark.polarion('10031')
def test_check_compliance(random_image_instance):
random_image_instance.assign_policy_profiles('OpenSCAP profile')
random_image_instance.check_compliance()
def get_table_attr(instance, table_name, attr):
# Trying to read the table <table_name> attribute <attr>
view = refresh_and_navigate(instance, 'Details')
table = getattr(view.entities, table_name, None)
if table:
return table.read().get(attr)
@pytest.mark.parametrize(('test_item'), TEST_ITEMS)
def test_containers_smartstate_analysis(provider, test_item, soft_assert,
delete_all_container_tasks,
random_image_instance):
if test_item.is_openscap:
random_image_instance.assign_policy_profiles('OpenSCAP profile')
else:
random_image_instance.unassign_policy_profiles('OpenSCAP profile')
random_image_instance.perform_smartstate_analysis(wait_for_finish=True)
view = navigate_to(random_image_instance, 'Details')
for tbl, attr, verifier in test_item.tested_attr:
table = getattr(view.entities, tbl)
table_data = {k.lower(): v for k, v in table.read().items()}
if not soft_assert(attr.lower() in table_data,
'{} table has missing attribute \'{}\''.format(tbl, attr)):
continue
provider.refresh_provider_relationships()
wait_for_retval = wait_for(lambda: get_table_attr(random_image_instance, tbl, attr),
message='Trying to get attribute "{}" of table "{}"'.format(
attr, tbl),
delay=5, num_sec=120, silent_failure=True)
if not wait_for_retval:
soft_assert(False, 'Could not get attribute "{}" for "{}" table.'
.format(attr, tbl))
continue
value = wait_for_retval.out
soft_assert(verifier(value),
'{}.{} attribute has unexpected value ({})'.format(tbl, attr, value))
| lkhomenk/integration_tests | cfme/tests/containers/test_containers_smartstate_analysis.py | Python | gpl-2.0 | 4,387 |
from base import BabeBase, StreamHeader, StreamFooter
# Get the tokens from https://dev.twitter.com/docs/auth/tokens-devtwittercom
# The consumer keys can be found on your application's Details
# page located at https://dev.twitter.com/apps (under "OAuth settings")
# The access tokens can be found on your applications's Details
# page located at https://dev.twitter.com/apps (located
# under "Your access token")
def flatten_status(u):
for p in ["author"]:
v = getattr(u, p)
for k in v.__dict__:
if k == "_api":
continue
setattr(u, "%s_%s" % (p, k), getattr(v, k))
hashtags = u.entities["hashtags"]
u.hashtags = [entity['text'] for entity in hashtags]
def build_status_names(u):
names = u.__dict__.keys()
names.sort()
for bad_key in ["_api", "user", "author"]:
try:
names.remove(bad_key)
except ValueError:
pass
return names
def pull_twitter(false_stream, consumer_key=None,
consumer_secret=None, access_token=None, access_token_secret=None):
import tweepy
if consumer_key:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
else:
api = tweepy.API()
# If the authentication was successful, you should
# see the name of the account print out
#print api.me().name
# If the application settings are set for "Read and Write" then
# this line should tweet out the message to your account's
# timeline. The "Read and Write" setting is on https://dev.twitter.com/apps
#api.update_status('Updating using OAuth authentication via Tweepy!')
metainfo = None
if consumer_key:
statuses = api.user_timeline(include_entities=True)
else:
statuses = api.public_timeline(include_entities=True)
for u in statuses:
flatten_status(u)
if not metainfo:
names = build_status_names(u)
metainfo = StreamHeader(typename="Status", fields=names)
yield metainfo
u.__class__.__iter__ = lambda s: iter([getattr(s, key) for key in names])
yield u
yield StreamFooter()
BabeBase.register('pull_twitter', pull_twitter)
| fdouetteau/PyBabe | pybabe/twitter.py | Python | bsd-3-clause | 2,291 |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
import tensorflow as tf
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.simulation.datasets import from_tensor_slices_client_data
from tensorflow_federated.python.simulation.datasets import transforming_client_data
TEST_DATA = {
'CLIENT A':
collections.OrderedDict(
x=[[1, 2], [3, 4], [5, 6]],
y=[4.0, 5.0, 6.0],
z=['a', 'b', 'c'],
),
'CLIENT B':
collections.OrderedDict(
x=[[10, 11]],
y=[7.0],
z=['d'],
),
'CLIENT C':
collections.OrderedDict(
x=[[100, 101], [200, 201]],
y=[8.0, 9.0],
z=['e', 'f'],
),
}
TEST_CLIENT_DATA = from_tensor_slices_client_data.TestClientData(TEST_DATA)
def _make_transform_expanded(client_id):
index_str = tf.strings.split(client_id, sep='_', maxsplit=1)[0]
index = tf.cast(tf.strings.to_number(index_str), tf.int32)
def fn(data):
return collections.OrderedDict([('x', data['x'] + 10 * index),
('y', data['y']), ('z', data['z'])])
return fn
def _make_transform_raw(client_id):
del client_id
def fn(data):
data['x'] = data['x'] + 10
return data
return fn
NUM_EXPANDED_CLIENTS = 3
def test_expand_client_id(client_id):
return [str(i) + '_' + client_id for i in range(NUM_EXPANDED_CLIENTS)]
def test_reduce_client_id(client_id):
return tf.strings.split(client_id, sep='_')[1]
TRANSFORMED_CLIENT_DATA = transforming_client_data.TransformingClientData(
TEST_CLIENT_DATA, _make_transform_expanded, test_expand_client_id,
test_reduce_client_id)
class TransformingClientDataTest(tf.test.TestCase):
def test_client_ids_property(self):
num_transformed_clients = len(TEST_DATA) * NUM_EXPANDED_CLIENTS
client_ids = TRANSFORMED_CLIENT_DATA.client_ids
self.assertLen(client_ids, num_transformed_clients)
for client_id in client_ids:
self.assertIsInstance(client_id, str)
self.assertListEqual(client_ids, sorted(client_ids))
def test_default_num_transformed_clients(self):
transformed_client_data = transforming_client_data.TransformingClientData(
TEST_CLIENT_DATA, _make_transform_raw)
client_ids = transformed_client_data.client_ids
self.assertCountEqual(client_ids, TEST_DATA.keys())
def test_fail_on_bad_client_id(self):
# The following three should be valid.
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('0_CLIENT A')
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('1_CLIENT B')
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('0_CLIENT C')
# This should not be valid: no prefix.
with self.assertRaisesRegex(ValueError,
'is not a client in this ClientData'):
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('CLIENT A')
# This should not be valid: no corresponding client.
with self.assertRaisesRegex(ValueError,
'is not a client in this ClientData'):
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('0_CLIENT D')
# This should not be valid: index out of range.
with self.assertRaisesRegex(ValueError,
'is not a client in this ClientData'):
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('3_CLIENT B')
def test_dataset_computation(self):
for client_id in TRANSFORMED_CLIENT_DATA.client_ids:
actual_dataset = TRANSFORMED_CLIENT_DATA.dataset_computation(client_id)
self.assertIsInstance(actual_dataset, tf.data.Dataset)
pattern = r'^(\d*)_(.*)$'
match = re.search(pattern, client_id)
client = match.group(2)
base_client_dataset = TEST_CLIENT_DATA.create_tf_dataset_for_client(
client)
expected_dataset = base_client_dataset.map(
_make_transform_expanded(client_id))
for actual_client_data, expected_client_data in zip(
actual_dataset.as_numpy_iterator(),
expected_dataset.as_numpy_iterator()):
for actual_datum, expected_datum in zip(actual_client_data,
expected_client_data):
self.assertEqual(actual_datum, expected_datum)
def test_create_tf_dataset_from_all_clients(self):
# Expands `CLIENT {N}` into N clients which add range(N) to the feature.
def expand_client_id(client_id):
return [client_id + '-' + str(i) for i in range(int(client_id[-1]))]
def make_transform_fn(client_id):
split_client_id = tf.strings.split(client_id, '-')
index = tf.cast(tf.strings.to_number(split_client_id[1]), tf.int32)
return lambda x: x + index
reduce_client_id = lambda client_id: tf.strings.split(client_id, sep='-')[0]
# pyformat: disable
raw_data = {
'CLIENT 1': [0], # expanded to [0]
'CLIENT 2': [1, 3, 5], # expanded to [1, 3, 5], [2, 4, 6]
'CLIENT 3': [7, 10] # expanded to [7, 10], [8, 11], [9, 12]
}
# pyformat: enable
client_data = from_tensor_slices_client_data.TestClientData(raw_data)
transformed_client_data = transforming_client_data.TransformingClientData(
client_data, make_transform_fn, expand_client_id, reduce_client_id)
flat_data = transformed_client_data.create_tf_dataset_from_all_clients()
self.assertIsInstance(flat_data, tf.data.Dataset)
all_features = [batch.numpy() for batch in flat_data]
self.assertCountEqual(all_features, range(13))
if __name__ == '__main__':
execution_contexts.set_local_python_execution_context()
tf.test.main()
| tensorflow/federated | tensorflow_federated/python/simulation/datasets/transforming_client_data_test.py | Python | apache-2.0 | 6,268 |
from demosys.resources.base import ResourceDescription
class DataDescription(ResourceDescription):
"""Describes data file to load"""
require_label = True
default_loader = 'binary'
resource_type = 'data'
def __init__(self, path=None, label=None, loader=None, **kwargs):
kwargs.update({
"path": path,
"label": label,
"loader": loader,
})
super().__init__(**kwargs)
class ProgramDescription(ResourceDescription):
"""Describes a program to load"""
require_label = True
default_loader = None
resource_type = 'programs'
def __init__(self, path=None, label=None, loader=None, reloadable=False,
vertex_shader=None, geometry_shader=None, fragment_shader=None,
tess_control_shader=None, tess_evaluation_shader=None, **kwargs):
kwargs.update({
"path": path,
"label": label,
"loader": loader,
"reloadable": reloadable,
"vertex_shader": vertex_shader,
"geometry_shader": geometry_shader,
"fragment_shader": fragment_shader,
"tess_control_shader": tess_control_shader,
"tess_evaluation_shader": tess_evaluation_shader,
})
super().__init__(**kwargs)
@property
def reloadable(self):
return self._kwargs.get('reloadable')
@reloadable.setter
def reloadable(self, value):
self._kwargs['reloadable'] = value
@property
def vertex_shader(self):
return self._kwargs.get('vertex_shader')
@property
def geometry_shader(self):
return self._kwargs.get('geometry_shader')
@property
def fragment_shader(self):
return self._kwargs.get('fragment_shader')
@property
def tess_control_shader(self):
return self._kwargs.get('tess_control_shader')
@property
def tess_evaluation_shader(self):
return self._kwargs.get('tess_evaluation_shader')
class SceneDescription(ResourceDescription):
"""Describes a scene to load"""
require_label = True
default_loader = None
resource_type = 'scenes'
def __init__(self, path=None, label=None, **kwargs):
kwargs.update({
"path": path,
"label": label,
})
super().__init__(**kwargs)
class TextureDescription(ResourceDescription):
"""Describes a texture to load"""
require_label = True
default_loader = '2d'
resource_type = 'textures'
def __init__(self, path=None, label=None, loader=None, flip=True, mipmap=True, image=None, **kwargs):
kwargs.update({
"path": path,
"label": label,
"loader": loader,
"flip": flip,
"image": image,
"mipmap": mipmap,
})
super().__init__(**kwargs)
@property
def flip(self):
return self._kwargs.get('flip')
@property
def image(self):
return self._kwargs.get('image')
@property
def mipmap(self):
return self._kwargs.get('mipmap')
| Contraz/demosys-py | demosys/resources/meta.py | Python | isc | 3,198 |
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Finance: admin page
"""
from treeio.finance.models import Transaction, Liability, Category
from django.contrib import admin
class TransactionAdmin(admin.ModelAdmin):
""" Transaction admin """
list_display = ('name', 'details')
search_fields = ['name']
class LiabilityAdmin(admin.ModelAdmin):
""" Liability admin """
list_display = ('name', 'details')
search_fields = ['name']
class CategoryAdmin(admin.ModelAdmin):
""" Category admin """
list_display = ('name','id')
search_fields = ['name']
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(Liability, LiabilityAdmin)
admin.site.register(Category, CategoryAdmin)
| rogeriofalcone/treeio | finance/admin.py | Python | mit | 799 |
# coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""`Server-sent events <http://en.wikipedia.org/wiki/Server-sent_events>`_
is a technology for providing push notifications from a server to a browser
client in the form of DOM events.
For more information, check out the `SEE demo
<https://github.com/fiorix/cyclone/tree/master/demos/sse>`_.
"""
from cyclone import escape
from cyclone.web import RequestHandler
from twisted.python import log
class SSEHandler(RequestHandler):
"""Subclass this class and define `bind` and `unbind` to get
notified when a new client connects or disconnects, respectively.
Once connected, you may send events to the browser via `sendEvent`.
"""
def __init__(self, application, request, **kwargs):
RequestHandler.__init__(self, application, request, **kwargs)
self.transport = request.connection.transport
self._auto_finish = False
def sendEvent(self, message, event=None, eid=None, retry=None):
"""
sendEvent is the single method to send events to clients.
Parameters:
message: the event itself
event: optional event name
eid: optional event id to be used as Last-Event-ID header or
e.lastEventId property
retry: set the retry timeout in ms. default 3 secs.
"""
if isinstance(message, dict):
message = escape.json_encode(message)
if isinstance(message, unicode):
message = message.encode("utf-8")
assert isinstance(message, str)
if eid:
self.transport.write("id: %s\n" % eid)
if event:
self.transport.write("event: %s\n" % event)
if retry:
self.transport.write("retry: %s\n" % retry)
self.transport.write("data: %s\n\n" % message)
def _execute(self, transforms, *args, **kwargs):
self._transforms = [] # transforms
if self.settings.get("debug"):
log.msg("SSE connection from %s" % self.request.remote_ip)
self.set_header("Content-Type", "text/event-stream")
self.set_header("Cache-Control", "no-cache")
self.set_header("Connection", "keep-alive")
self.flush()
self.request.connection.setRawMode()
self.notifyFinish().addCallback(self.on_connection_closed)
self.bind()
def on_connection_closed(self, *args, **kwargs):
if self.settings.get("debug"):
log.msg("SSE client disconnected %s" % self.request.remote_ip)
self.unbind()
def bind(self):
"""Gets called when a new client connects."""
pass
def unbind(self):
"""Gets called when an existing client disconnects."""
pass
| lextoumbourou/cyclone | cyclone/sse.py | Python | apache-2.0 | 3,312 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.