code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
try:
from ez_setup import use_setuptools
except ImportError:
pass
else:
use_setuptools()
from setuptools import setup
setup(
name = "hammerd",
version = "0.1.1",
url = 'http://www.hammerd.org/',
license = 'BSD',
description = "HammerD Service and Helper libs",
author = 'Amit Upadhyay',
author_email = "upadhyay@gmail.com",
py_modules = ["hammer", "hammerlib"],
install_requires = ['amitu-zutils', "eventlet", "argparse"],
entry_points={
'console_scripts': [
'hammerd = hammer:debug_main',
]
},
)
|
amitu/hammerd
|
setup.py
|
Python
|
bsd-3-clause
| 595
|
ACTION_CREATE = 0
ACTION_VIEW = 1
ACTION_UPDATE = 2
ACTION_DELETE = 3
ACTIONS = {
ACTION_CREATE: 'Create',
ACTION_VIEW: 'View',
ACTION_UPDATE: 'Update',
ACTION_DELETE: 'Delete',
}
STATIC = 'st'
DYNAMIC = 'dy'
LEVEL_GUEST = 0
LEVEL_USER = 1
LEVEL_ADMIN = 2
LEVELS = {
LEVEL_GUEST: 'Guest',
LEVEL_USER: 'User',
LEVEL_ADMIN: 'Admin',
}
IP_TYPE_4 = '4'
IP_TYPE_6 = '6'
IP_TYPES = {
IP_TYPE_4: 'IPv4',
IP_TYPE_6: 'IPv6'
}
DHCP_OBJECTS = ("workgroup", "vrf", "vlan", "site", "range", "network",
"static_interface", "dynamic_interface", "workgroup_av",
"vrf_av", "vlan_av", "site_av", "range_av", "network_av",
"static_interface_av", "dynamic_interface_av",)
DNS_OBJECTS = ("address_record", "cname", "domain", "mx", "nameserver", "ptr",
"soa", "srv", "sshfp", "txt", "view",)
CORE_OBJECTS = ("ctnr_users", "ctnr", "user", "system")
def get_klasses(obj_type):
from cyder.cydns.address_record.forms import AddressRecordForm
from cyder.cydns.cname.forms import CNAMEForm
from cyder.core.ctnr.forms import CtnrForm
from cyder.cydns.domain.forms import DomainForm
from cyder.cydhcp.interface.dynamic_intr.forms import (DynamicInterfaceForm,
DynamicInterfaceAVForm)
from cyder.cydns.mx.forms import MXForm
from cyder.cydns.nameserver.forms import NameserverForm
from cyder.cydhcp.network.forms import NetworkForm, NetworkAVForm
from cyder.cydns.ptr.forms import PTRForm
from cyder.cydhcp.range.forms import RangeForm, RangeAVForm
from cyder.cydhcp.site.forms import SiteForm, SiteAVForm
from cyder.cydns.soa.forms import SOAForm, SOAAVForm
from cyder.cydns.srv.forms import SRVForm
from cyder.cydns.sshfp.forms import SSHFPForm
from cyder.core.system.forms import SystemForm, SystemAVForm
from cyder.cydhcp.interface.static_intr.forms import (StaticInterfaceForm,
StaticInterfaceAVForm)
from cyder.cydns.txt.forms import TXTForm
from cyder.cydhcp.vlan.forms import VlanForm, VlanAVForm
from cyder.cydhcp.vrf.forms import VrfForm, VrfAVForm
from cyder.cydhcp.workgroup.forms import WorkgroupForm, WorkgroupAVForm
from cyder.models import (
AddressRecord, CNAME, Ctnr, Domain, DynamicInterface, DynamicInterfaceAV,
MX, Nameserver, Network, NetworkAV, PTR, Range, RangeAV, Site, SiteAV, SOA,
SOAAV, SRV, SSHFP, StaticInterface, StaticInterfaceAV, System, SystemAV,
TXT, Vlan, VlanAV, Vrf, VrfAV, Workgroup, WorkgroupAV
)
klasses = {
'address_record': (AddressRecord, AddressRecordForm),
'cname': (CNAME, CNAMEForm),
'ctnr': (Ctnr, CtnrForm),
'domain': (Domain, DomainForm),
'dynamic_interface': (DynamicInterface, DynamicInterfaceForm),
'dynamic_interface_av': (DynamicInterfaceAV, DynamicInterfaceAVForm),
'mx': (MX, MXForm),
'nameserver': (Nameserver, NameserverForm),
'network': (Network, NetworkForm),
'network_av': (NetworkAV, NetworkAVForm),
'ptr': (PTR, PTRForm),
'range': (Range, RangeForm),
'range_av': (RangeAV, RangeAVForm),
'site': (Site, SiteForm),
'site_av': (SiteAV, SiteAVForm),
'soa': (SOA, SOAForm),
'soa_av': (SOAAV, SOAAVForm),
'srv': (SRV, SRVForm),
'sshfp': (SSHFP, SSHFPForm),
'static_interface': (StaticInterface, StaticInterfaceForm),
'static_interface_av': (StaticInterfaceAV, StaticInterfaceAVForm),
'system': (System, SystemForm),
'system_av': (SystemAV, SystemAVForm),
'txt': (TXT, TXTForm),
'vlan': (Vlan, VlanForm),
'vlan_av': (VlanAV, VlanAVForm),
'vrf': (Vrf, VrfForm),
'vrf_av': (VrfAV, VrfAVForm),
'workgroup': (Workgroup, WorkgroupForm),
'workgroup_av': (WorkgroupAV, WorkgroupAVForm),
}
return klasses[obj_type]
|
zeeman/cyder
|
cyder/base/constants.py
|
Python
|
bsd-3-clause
| 4,037
|
"""Simple example of two-session fMRI model fitting
================================================
Full step-by-step example of fitting a GLM to experimental data and visualizing
the results. This is done on two runs of one subject of the FIAC dataset.
For details on the data, please see:
Dehaene-Lambertz G, Dehaene S, Anton JL, Campagne A, Ciuciu P, Dehaene
G, Denghien I, Jobert A, LeBihan D, Sigman M, Pallier C, Poline
JB. Functional segregation of cortical language areas by sentence
repetition. Hum Brain Mapp. 2006: 27:360--371.
http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2653076#R11
More specifically:
1. A sequence of fMRI volumes are loaded
2. A design matrix describing all the effects related to the data is computed
3. a mask of the useful brain volume is computed
4. A GLM is applied to the dataset (effect/covariance,
then contrast estimation)
Technically, this example shows how to handle two sessions that
contain the same experimental conditions. The model directly returns a
fixed effect of the statistics across the two sessions.
"""
###############################################################################
# Create a write directory to work
# it will be a 'results' subdirectory of the current directory.
from os import mkdir, path, getcwd
write_dir = path.join(getcwd(), 'results')
if not path.exists(write_dir):
mkdir(write_dir)
#########################################################################
# Prepare data and analysis parameters
# --------------------------------------
#
# Note that there are two sessions
from nistats import datasets
data = datasets.fetch_fiac_first_level()
fmri_img = [data['func1'], data['func2']]
#########################################################################
# Create a mean image for plotting purpose
from nilearn.image import mean_img
mean_img_ = mean_img(fmri_img[0])
#########################################################################
# The design matrices were pre-computed, we simply put them in a list of DataFrames
design_files = [data['design_matrix1'], data['design_matrix2']]
import pandas as pd
import numpy as np
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]
#########################################################################
# GLM estimation
# ----------------------------------
# GLM specification. Note that the mask was provided in the dataset. So we use it.
from nistats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(mask_img=data['mask'], minimize_memory=True)
#########################################################################
# GLM fitting
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)
#########################################################################
# Compute fixed effects of the two runs and compute related images
# For this, we first define the contrasts as we would do for a single session
n_columns = design_matrices[0].shape[1]
def pad_vector(contrast_, n_columns):
"""A small routine to append zeros in contrast vectors"""
return np.hstack((contrast_, np.zeros(n_columns - len(contrast_))))
#########################################################################
# Contrast specification
contrasts = {'SStSSp_minus_DStDSp': pad_vector([1, 0, 0, -1], n_columns),
'DStDSp_minus_SStSSp': pad_vector([-1, 0, 0, 1], n_columns),
'DSt_minus_SSt': pad_vector([-1, -1, 1, 1], n_columns),
'DSp_minus_SSp': pad_vector([-1, 1, -1, 1], n_columns),
'DSt_minus_SSt_for_DSp': pad_vector([0, -1, 0, 1], n_columns),
'DSp_minus_SSp_for_DSt': pad_vector([0, 0, -1, 1], n_columns),
'Deactivation': pad_vector([-1, -1, -1, -1, 4], n_columns),
'Effects_of_interest': np.eye(n_columns)[:5]}
#########################################################################
# Compute and plot statistics
from nilearn import plotting
print('Computing contrasts...')
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
print(' Contrast % 2i out of %i: %s' % (
index + 1, len(contrasts), contrast_id))
# estimate the contasts
# note that the model implictly compute a fixed effects across the two sessions
z_map = fmri_glm.compute_contrast(
contrast_val, output_type='z_score')
# Write the resulting stat images to file
z_image_path = path.join(write_dir, '%s_z_map.nii.gz' % contrast_id)
z_map.to_filename(z_image_path)
#########################################################################
# Comparing session-specific and fixed effects.
# Here, we compare the activation mas produced from each separately then, the fixed effects version
contrast_id = 'Effects_of_interest'
#########################################################################
# Statistics for the first session
fmri_glm = fmri_glm.fit(fmri_img[0], design_matrices=design_matrices[0])
z_map = fmri_glm.compute_contrast(
contrasts[contrast_id], output_type='z_score')
plotting.plot_stat_map(
z_map, bg_img=mean_img_, threshold=3.0,
title='%s, first session' % contrast_id)
#########################################################################
# Statistics for the second session
fmri_glm = fmri_glm.fit(fmri_img[1], design_matrices=design_matrices[1])
z_map = fmri_glm.compute_contrast(
contrasts[contrast_id], output_type='z_score')
plotting.plot_stat_map(
z_map, bg_img=mean_img_, threshold=3.0,
title='%s, second session' % contrast_id)
#########################################################################
# Fixed effects statistics
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)
z_map = fmri_glm.compute_contrast(
contrasts[contrast_id], output_type='z_score')
plotting.plot_stat_map(
z_map, bg_img=mean_img_, threshold=3.0,
title='%s, fixed effects' % contrast_id)
#########################################################################
# Not unexpectedly, the fixed effects version looks displays higher peaks than the input sessions. Computing fixed effects enhances the signal-to-noise ratio of the resulting brain maps
plotting.show()
|
bthirion/nistats
|
examples/02_first_level_models/plot_fiac_analysis.py
|
Python
|
bsd-3-clause
| 6,196
|
import re
import requests
# This is to allow monkey-patching in fbcode
from torch.hub import load_state_dict_from_url # noqa
from torchtext._internal.module_utils import is_module_available
from tqdm import tqdm
if is_module_available("torchdata"):
from torchdata.datapipes.iter import HttpReader # noqa F401
def _stream_response(r, chunk_size=16 * 1024):
total_size = int(r.headers.get("Content-length", 0))
with tqdm(total=total_size, unit="B", unit_scale=1) as t:
for chunk in r.iter_content(chunk_size):
if chunk:
t.update(len(chunk))
yield chunk
def _get_response_from_google_drive(url):
confirm_token = None
session = requests.Session()
response = session.get(url, stream=True)
for k, v in response.cookies.items():
if k.startswith("download_warning"):
confirm_token = v
if confirm_token is None:
if "Quota exceeded" in str(response.content):
raise RuntimeError(
"Google drive link {} is currently unavailable, because the quota was exceeded.".format(url)
)
else:
raise RuntimeError("Internal error: confirm_token was not found in Google drive link.")
url = url + "&confirm=" + confirm_token
response = session.get(url, stream=True)
if "content-disposition" not in response.headers:
raise RuntimeError("Internal error: headers don't contain content-disposition.")
filename = re.findall('filename="(.+)"', response.headers["content-disposition"])
if filename is None:
raise RuntimeError("Filename could not be autodetected")
filename = filename[0]
return response, filename
class DownloadManager:
def get_local_path(self, url, destination):
if "drive.google.com" not in url:
response = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, stream=True)
else:
response, filename = _get_response_from_google_drive(url)
with open(destination, "wb") as f:
for chunk in _stream_response(response):
f.write(chunk)
_DATASET_DOWNLOAD_MANAGER = DownloadManager()
|
pytorch/text
|
torchtext/_download_hooks.py
|
Python
|
bsd-3-clause
| 2,176
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-01-27 14:26:40
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-11-12 14:14:41
from __future__ import print_function, division, absolute_import
from flask import request, current_app as app
from flask import Blueprint, jsonify, render_template, g
from marvin.utils.db import get_traceback
from marvin.web.extensions import sentry
errors = Blueprint('error_handlers', __name__)
def make_error_json(error, name, code, data=None):
''' creates the error json dictionary for API errors '''
shortname = name.lower().replace(' ', '_')
messages = {'error': shortname,
'message': error.description if hasattr(error, 'description') else None,
'status_code': code,
'traceback': get_traceback(asstring=True)}
if data:
return jsonify({'validation_errors': data}), code
else:
return jsonify({'api_error': messages}), code
def make_error_page(app, name, code, sentry=None, data=None, exception=None):
''' creates the error page dictionary for web errors '''
shortname = name.lower().replace(' ', '_')
error = {}
error['title'] = 'Marvin | {0}'.format(name)
error['page'] = request.url
error['event_id'] = g.get('sentry_event_id', None)
error['data'] = data
error['name'] = name
error['code'] = code
error['message'] = exception.description if exception and hasattr(exception, 'description') else None
if app.config['USE_SENTRY'] and sentry:
error['public_dsn'] = sentry.client.get_public_dsn('https')
app.logger.error('{0} Exception {1}'.format(name, error))
return render_template('errors/{0}.html'.format(shortname), **error), code
# ----------------
# Error Handling
# ----------------
def _is_api(request):
''' Checks if the error comes from the api '''
return request.blueprint == 'api' or 'api' in request.url
@errors.app_errorhandler(404)
def page_not_found(error):
name = 'Page Not Found'
if _is_api(request):
return make_error_json(error, name, 404)
else:
return make_error_page(app, name, 404, sentry=sentry, exception=error)
@errors.app_errorhandler(500)
def internal_server_error(error):
name = 'Internal Server Error'
if _is_api(request):
return make_error_json(error, name, 500)
else:
return make_error_page(app, name, 500, sentry=sentry, exception=error)
@errors.app_errorhandler(400)
def bad_request(error):
name = 'Bad Request'
if _is_api(request):
return make_error_json(error, name, 400)
else:
return make_error_page(app, name, 400, sentry=sentry, exception=error)
@errors.app_errorhandler(405)
def method_not_allowed(error):
name = 'Method Not Allowed'
if _is_api(request):
return make_error_json(error, name, 405)
else:
return make_error_page(app, name, 405, sentry=sentry, exception=error)
@errors.app_errorhandler(422)
def handle_unprocessable_entity(error):
name = 'Unprocessable Entity'
data = getattr(error, 'data')
if data:
# Get validations from the ValidationError object
messages = data['messages']
else:
messages = ['Invalid request']
if _is_api(request):
return make_error_json(error, name, 422, data=messages)
else:
return make_error_page(app, name, 422, sentry=sentry, data=messages, exception=error)
@errors.app_errorhandler(429)
def rate_limit_exceeded(error):
name = 'Rate Limit Exceeded'
if _is_api(request):
return make_error_json(error, name, 429)
else:
return make_error_page(app, name, 429, sentry=sentry, exception=error)
@errors.app_errorhandler(504)
def gateway_timeout(error):
name = 'Gateway Timeout'
if _is_api(request):
return make_error_json(error, name, 504)
else:
return make_error_page(app, name, 504, sentry=sentry, exception=error)
@errors.app_errorhandler(413)
def entity_too_large(error):
name = 'Request Entity Too Large'
if _is_api(request):
return make_error_json(error, name, 413)
else:
return make_error_page(app, name, 413, sentry=sentry, exception=error)
@errors.app_errorhandler(409)
def conflict(error):
name = 'Conflict'
if _is_api(request):
return make_error_json(error, name, 409)
else:
return make_error_page(app, name, 409, sentry=sentry, exception=error)
|
sdss/marvin
|
python/marvin/web/error_handlers.py
|
Python
|
bsd-3-clause
| 4,532
|
from corehq.apps.programs.models import Program
from corehq.apps.reports.filters.base import BaseSingleOptionFilter, CheckboxFilter
from django.utils.translation import ugettext_lazy, ugettext_noop
class SelectReportingType(BaseSingleOptionFilter):
slug = "report_type"
label = ugettext_noop("Reporting data type")
default_text = ugettext_noop("Show aggregate data")
@property
def options(self):
return [
("facilities", "Show facility level data"),
]
class AdvancedColumns(CheckboxFilter):
label = ugettext_lazy("Show advanced columns")
slug = "advanced_columns"
class ProgramFilter(BaseSingleOptionFilter):
slug = "program"
label = ugettext_noop("Program")
default_text = ugettext_lazy("All")
@property
def options(self):
programs = Program.by_domain(self.domain)
return [(program.get_id, program.name) for program in programs]
|
qedsoftware/commcare-hq
|
corehq/apps/reports/filters/commtrack.py
|
Python
|
bsd-3-clause
| 929
|
#!/usr/bin/env python
# Copyright (c) 2009-2010, Anton Korenyushkin
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
from fnmatch import fnmatch
from random import randrange
import cookielib
import errno
import hashlib
import httplib
import os
import os.path
import re
import shutil
import sys
import urllib
import urllib2
################################################################################
# Constants
################################################################################
__version__ = '0.2.0'
SERVER = 'www.akshell.com'
CONFIG_DIR = (os.path.join(os.environ['APPDATA'], 'Akshell')
if sys.platform == 'win32' else
os.path.join(os.path.expanduser('~'), '.akshell'))
COOKIE_PATH = os.path.join(CONFIG_DIR, 'cookie')
LOAD_COOKIE = False
NAME_PATH = os.path.join(CONFIG_DIR, 'name')
LOAD_NAME = False
IGNORES = ('*~', '*.bak', '.*', '#*', '*.orig')
################################################################################
# Errors
################################################################################
class Error(Exception): pass
class DoesNotExistError(Error): pass
class LoginRequiredError(Error):
def __init__(self):
Error.__init__(self, 'Login required')
class RequestError(Error):
def __init__(self, message, code):
Error.__init__(self, message)
self.code = code
################################################################################
# Internals
################################################################################
def _request(url, data=None, code=httplib.OK, headers=None, cookie=LOAD_COOKIE):
headers = dict(headers) if headers else {}
headers['Accept'] = 'text/plain'
headers['User-Agent'] = 'akshell ' + __version__
if cookie is LOAD_COOKIE:
cookie = cookielib.MozillaCookieJar(COOKIE_PATH)
try:
cookie.load()
except IOError, error:
if error.errno != errno.ENOENT: raise
cookie = None
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.HTTPHandler())
if cookie is not None:
opener.add_handler(urllib2.HTTPCookieProcessor(cookie))
request = urllib2.Request(url, data, headers=headers)
response = opener.open(request)
if response.code != code:
raise RequestError(response.read(), response.code)
return response
class Diff(object):
def __init__(self):
self.delete = []
self.create = []
self.save = []
class Entry(object):
def diff(self, dst, clean):
diff = Diff()
if dst:
self._do_diff(dst, clean, diff, [])
else:
self._create(diff, [])
return diff
class Dir(Entry):
def __init__(self, children=None):
self._children = children or {}
def add(self, name, entry):
self._children[name] = entry
def _create(self, diff, route):
diff.create.append(route)
for name, entry in self._children.items():
entry._create(diff, route + [name])
def _do_diff(self, dst, clean, diff, route):
if isinstance(dst, Dir):
for name, src_entry in self._children.items():
child_route = route + [name]
try:
dst_entry = dst._children[name]
except KeyError:
src_entry._create(diff, child_route)
else:
src_entry._do_diff(dst_entry, clean, diff, child_route)
if clean:
for name in dst._children:
if name not in self._children:
diff.delete.append(route + [name])
else:
if isinstance(dst, File):
diff.delete.append(route)
self._create(diff, route)
class File(Entry):
def __init__(self, etag=None):
self._etag = etag
def _create(self, diff, route):
diff.save.append(route)
def _do_diff(self, dst, clean, diff, route):
if isinstance(dst, File):
if self._etag == dst._etag:
return
elif isinstance(dst, Dir):
diff.delete.append(route)
diff.save.append(route)
class Local(object):
def __init__(self, path, ignores=IGNORES):
self._path = path
self._ignores = ignores
def _do_traverse(self):
if os.path.isdir(self._path):
return Dir(
dict((name, Local(os.path.join(self._path, name),
self._ignores)._do_traverse())
for name in os.listdir(self._path)
if all(not fnmatch(name, ignore)
for ignore in self._ignores)))
else:
with open(self._path, 'rb') as f:
return File(hashlib.md5(f.read()).hexdigest())
def traverse(self):
if not os.path.exists(self._path):
raise DoesNotExistError('Local entry "%s" does not exist'
% self._path)
return self._do_traverse()
def _get_path(self, route):
return os.path.join(self._path, *route)
def read_files(self, routes):
contents = []
for route in routes:
with open(self._get_path(route), 'rb') as f:
contents.append(f.read())
return contents
def deploy(self, diff, contents):
for route in diff.delete:
path = self._get_path(route)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
for route in diff.create:
os.mkdir(self._get_path(route))
assert len(diff.save) == len(contents)
for route, content in zip(diff.save, contents):
with open(self._get_path(route), 'wb') as f:
f.write(content)
class Buffer(object):
def __init__(self, data=None):
self.data = data
def _do_traverse(self):
if isinstance(self.data, dict):
return Dir(dict((name, Buffer(self.data[name])._do_traverse())
for name in self.data))
else:
return File(hashlib.md5(self.data).hexdigest())
def traverse(self):
if self.data is None:
raise DoesNotExistError('Buffer entry does not exist')
return self._do_traverse()
def _get(self, route):
result = self.data
for name in route:
result = result[name]
return result
def read_files(self, routes):
return [self._get(route) for route in routes]
def deploy(self, diff, contents):
for route in diff.delete:
del self._get(route[:-1])[route[-1]]
for route in diff.create:
if route:
self._get(route[:-1])[route[-1]] = {}
else:
self.data = {}
assert len(diff.save) == len(contents)
for route, content in zip(diff.save, contents):
self._get(route[:-1])[route[-1]] = content
def _load_name():
try:
with open(NAME_PATH) as f:
return f.read().strip()
except IOError, error:
raise (LoginRequiredError()
if error.errno == errno.ENOENT else
error)
def _encode_multipart(fields, files):
boundary = hex(randrange(2 ** 64))[2:]
parts = []
for name, value in fields:
parts.append(
'--%s\r\nContent-Disposition: form-data; name=%s\r\n'
% (boundary, name))
parts.append(value)
for name, path, value in files:
parts.append(
'--%s\r\nContent-Disposition: form-data; name=%s; filename=%s\r\n'
% (boundary, name, path))
parts.append(value)
parts.append('--%s--\n' % boundary)
return 'multipart/form-data; boundary=' + boundary, '\r\n'.join(parts)
class Remote(object):
def __init__(self, app_name, owner_name=LOAD_NAME, spot_name=None, path='',
cookie=LOAD_COOKIE):
assert owner_name is not None if spot_name else not owner_name
if spot_name and owner_name is LOAD_NAME:
owner_name = _load_name()
self._url = (
'http://%s/apps/%s/' % (SERVER, app_name) +
('devs/%s/spots/%s' % (owner_name.replace(' ', '-'), spot_name)
if spot_name else
'code'))
self._path = re.sub('//+', '/', path.strip('/'))
if self._path:
self._url += '/' + urllib.quote(self._path)
self._cookie = cookie
def _request(self, *args):
return _request(*args, **{'cookie': self._cookie})
def _traverse_dir(self):
data = self._request(self._url + '/?etag&recursive').read()
lines = data.split('\r\n') if data else []
root = Dir()
dirs = [('', root)]
for line in lines:
while not line.startswith(dirs[-1][0]):
dirs.pop()
parent_path, parent_dir = dirs[-1]
if line.endswith('/'):
name = line[len(parent_path):-1]
assert '/' not in name
dir = Dir()
parent_dir.add(name, dir)
dirs.append((line, dir))
else:
idx = line.rfind(' ')
name = line[len(parent_path):idx]
assert '/' not in name
parent_dir.add(name, File(line[idx + 1:]))
return root
def traverse(self):
try:
return self._traverse_dir()
except RequestError, error:
if error.code == httplib.MOVED_PERMANENTLY:
return File()
if (error.code == httplib.NOT_FOUND and
str(error).startswith('Entry ')):
raise DoesNotExistError('Remote entry "%s" does not exist'
% self._path)
raise
def read_files(self, routes):
if not routes:
return []
if routes == [[]]:
return [self._request(self._url).read()]
response = self._request(
self._url + '/?files=' +
urllib.quote('\n'.join('/'.join(route) for route in routes)))
boundary = response.headers['Content-Type'].rpartition('=')[2]
return [part[part.find('\r\n\r\n') + 4:-4]
for part in response.read().split(boundary)[1:-1]]
def deploy(self, diff, contents):
fields = ([('op', 'deploy')] +
[(name, '\n'.join('/'.join(route) for route in routes))
for name, routes in (('delete', diff.delete),
('create', diff.create))
if routes])
assert len(diff.save) == len(contents)
files = [('save', '/'.join(route), content)
for route, content in zip(diff.save, contents)]
content_type, body = _encode_multipart(fields, files)
self._request(self._url + '/', body, httplib.FOUND,
{'Content-Type': content_type})
################################################################################
# API
################################################################################
def login(name, password):
'''Login to the server.
Store username and authentication cookie in a config directory.
'''
cookie = cookielib.MozillaCookieJar(COOKIE_PATH)
_request('http://%s/login/' % SERVER,
urllib.urlencode({'name': name,
'password': password,
}),
httplib.FOUND,
cookie=cookie)
try:
os.mkdir(CONFIG_DIR)
except OSError, error:
if error.errno != errno.EEXIST: raise
cookie.save()
with open(NAME_PATH, 'w') as f:
f.write(name)
def logout():
'''Logout by removing config directory'''
try:
shutil.rmtree(CONFIG_DIR)
except OSError, error:
if error.errno != errno.ENOENT: raise
def evaluate(app_name, spot_name, expr, cookie=LOAD_COOKIE):
'''Evaluate expression in release or spot context'''
response = _request('http://%s/apps/%s/eval/' % (SERVER, app_name),
urllib.urlencode({'spot': spot_name or '',
'expr': expr,
}),
cookie=cookie)
status, data = response.read().split('\n', 1)
return (status == 'OK'), data
def transfer(src, dst, clean=False):
src_entry = src.traverse()
try:
dst_entry = dst.traverse()
except DoesNotExistError:
dst_entry = None
diff = src_entry.diff(dst_entry, clean)
dst.deploy(diff, src.read_files(diff.save))
return diff
|
akshell/tool
|
akshell.py
|
Python
|
bsd-3-clause
| 14,366
|
# -*- coding: utf-8 -*-
""" Test suite for pipes module.
"""
import pytest
import numpy as np
import pygfunction as gt
# =============================================================================
# Test functions
# =============================================================================
# Test convective_heat_transfer_coefficient_circular_pipe
@pytest.mark.parametrize("m_flow, expected", [
(0.05, 90.07260000000001), # Laminar flow
(0.10, 572.256944167273), # Transition flow
(0.50, 4036.196217814895) # Turbulent flow
])
def test_convective_heat_transfer_coefficient_circular_pipe(m_flow, expected):
r_in = 0.01 # Inner radius [m]
epsilon = 1.5e-6 # Pipe surface roughness [m]
visc = 0.00203008 # Fluid viscosity [kg/m.s]
den = 1014.78 # Fluid density [kg/m3]
cp = 3977. # Fluid specific heat capacity [J/kg.K]
k = 0.4922 # Fluid thermal conductivity [W/m.K]
# Convective heat transfer coefficient [W/m2.K]
h = gt.pipes.convective_heat_transfer_coefficient_circular_pipe(
m_flow, r_in, visc, den, k, cp, epsilon)
assert np.isclose(h, expected)
# Test convective_heat_transfer_coefficient_concentric_annulus
@pytest.mark.parametrize("m_flow, expected", [
(0.05, (141.4907984705223, 110.95487746200112)), # Laminar flow
(0.40, (904.4869811625874, 904.4869811625874)), # Transition flow
(0.60, (1411.2063074288633, 1411.2063074288633)) # Turbulent flow
])
def test_convective_heat_transfer_coefficient_concentric_annulus(
m_flow, expected):
r_in = 0.01 # Inner radius [m]
r_out = 0.02 # Outer radius [m]
epsilon = 1.5e-6 # Pipe surface roughness [m]
visc = 0.00203008 # Fluid viscosity [kg/m.s]
den = 1014.78 # Fluid density [kg/m3]
cp = 3977. # Fluid specific heat capacity [J/kg.K]
k = 0.4922 # Fluid thermal conductivity [W/m.K]
# Convective heat transfer coefficients [W/m2.K]
h = gt.pipes.convective_heat_transfer_coefficient_concentric_annulus(
m_flow, r_in, r_out, visc, den, k, cp, epsilon)
assert np.allclose(h, expected)
# Test conduction_thermal_resistance_circular_pipe
def test_conduction_thermal_resistance_circular_pipe():
r_in = 0.01 # Inner radius [m]
r_out = 0.02 # Outer radius [m]
k = 0.6 # Fluid thermal conductivity [W/m.K]
# Conduction thermal resistance [m.K/W]
expected = 0.18386300012720966
R = gt.pipes.conduction_thermal_resistance_circular_pipe(r_in, r_out, k)
assert np.isclose(R, expected)
# Test fluid_friction_factor_circular_pipe
@pytest.mark.parametrize("m_flow, expected", [
(0.05, 0.04081718025087723), # Laminar flow
(0.50, 0.027641340780182006) # Turbulent flow
])
def test_fluid_friction_factor_circular_pipe(m_flow, expected):
r_in = 0.01 # Inner radius [m]
epsilon = 1.5e-6 # Pipe surface roughness [m]
visc = 0.00203008 # Fluid viscosity [kg/m.s]
den = 1014.78 # Fluid density [kg/m3]
# Fluid Darcy friction factor [-]
f = gt.pipes.fluid_friction_factor_circular_pipe(
m_flow, r_in, visc, den, epsilon)
assert np.isclose(f, expected)
# Test thermal_resistances
@pytest.mark.parametrize("J, R_expected, Rd_expected", [
(0, # Zero-th order multipoles
np.array([[0.25486306, 0.01538038],
[0.01538038, 0.25206829]]),
np.array([[0.27042505, 4.16155713],
[4.16155713, 0.26726918]])),
(1, # First order multipoles
np.array([[0.25569372, 0.01562313],
[0.01562313, 0.25288076]]),
np.array([[0.27150208, 4.12311447],
[4.12311447, 0.26832082]])),
(2, # Second order multipoles
np.array([[0.25590404, 0.01560503],
[0.01560503, 0.25308681]]),
np.array([[0.27169419, 4.13472001],
[4.13472001, 0.26850888]])),
(3, # Third order multipoles
np.array([[0.25592405, 0.01560826],
[0.01560826, 0.25310667]]),
np.array([[0.27171747, 4.1345064 ],
[4.1345064, 0.26853194]])),
])
def test_thermal_resistances(J, R_expected, Rd_expected):
# Pipe positions [m]
pos_2pipes = [(0.03, 0.00), (-0.03, 0.02)]
r_out = 0.02 # Pipe outer radius [m]
r_b = 0.07 # Borehole radius [m]
k_s = 2.5 # Ground thermal conductivity [W/m.K]
k_g = 1.5 # Grout thermal conductivity [W/m.K]
# Fluid to outer pipe wall thermal resistance [m.K/W]
beta = 1.2
Rfp = beta / (2 * np.pi * k_g)
# Thermal resistances [m.k/W]
R, Rd = gt.pipes.thermal_resistances(
pos_2pipes, r_out, r_b, k_s, k_g, Rfp, J=J)
assert np.allclose(R, R_expected) and np.allclose(Rd, Rd_expected)
# Test multipole
@pytest.mark.parametrize("J, expected", [
(0, np.array([2.67263436, 2.47271955, 2.15219567, 1.95228086])),
(1, np.array([2.71748588, 2.51729508, 2.19369127, 1.99350047])),
(2, np.array([2.71914947, 2.51894712, 2.19312216, 1.99291981])),
(3, np.array([2.71942944, 2.51913631, 2.19328373, 1.9929906])),
])
def test_multipole(J, expected):
# Pipe positions [m]
pos_4pipes = [
(0.03, 0.03), (-0.03, 0.03), (-0.03, -0.03), (0.03, -0.03)]
r_out = 0.02 # Pipe outer radius [m]
r_b = 0.07 # Borehole radius [m]
k_s = 2.5 # Ground thermal conductivity [W/m.K]
k_g = 1.5 # Grout thermal conductivity [W/m.K]
T_b = 0.0 # Borehole wall temperature [degC]
# Pipe heat transfer rates [W/m]
q_p = np.array([10., 9., 8., 7.])
# Fluid to outer pipe wall thermal resistance [m.K/W]
beta = 1.2
R_fp = beta / (2 * np.pi * k_g)
# Fluid temperatures [degC]
T_f = gt.pipes.multipole(
pos_4pipes, r_out, r_b, k_s, k_g, R_fp, T_b, q_p, J)[0]
assert np.allclose(T_f, expected)
# =============================================================================
# Test pipe classes
# =============================================================================
# Test get_temperature
@pytest.mark.parametrize("pipe_fixture, segment_ratios, T_b, z, expected", [
# Single U-tube
('single_Utube', None, 1., 65., np.array([4.34676755, 3.07354134])),
('single_Utube', None, np.array([1., 2., 3., 1.]), 65., np.array([4.41754093, 3.49949295])),
('single_Utube', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 65., np.array([4.47310416, 3.66490249])),
('single_Utube', None, 1., np.array([65., 75.]), np.array([[4.34676755, 3.07354134], [4.25566624, 3.13435325]])),
('single_Utube', None, np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.41754093, 3.49949295], [4.35173147, 3.54346564]])),
('single_Utube', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.47310416, 3.66490249], [4.42647994, 3.69214797]])),
# Double U-tube (Parallel)
('double_Utube_parallel', None, 1., 65., np.array([3.87525104, 3.87525104, 2.20313908, 2.20313908])),
('double_Utube_parallel', None, np.array([1., 2., 3., 1.]), 65., np.array([4.00464852, 4.00464852, 2.84788608, 2.84788608])),
('double_Utube_parallel', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 65., np.array([4.09862044, 4.09862044, 3.07439258, 3.07439258])),
('double_Utube_parallel', None, 1., np.array([65., 75.]), np.array([[3.87525104, 3.87525104, 2.20313908, 2.20313908], [3.73265141, 3.73265141, 2.26719823, 2.26719823]])),
('double_Utube_parallel', None, np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.00464852, 4.00464852, 2.84788608, 2.84788608], [3.90522192, 3.90522192, 2.89301847, 2.89301847]])),
('double_Utube_parallel', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.09862044, 4.09862044, 3.07439258, 3.07439258], [4.03210009, 4.03210009, 3.09222735, 3.09222735]])),
# Double U-tube (Series)
('double_Utube_series', None, 1., 65., np.array([4.36908096, 2.53231146, 3.13441957, 2.03763963])),
('double_Utube_series', None, np.array([1., 2., 3., 1.]), 65., np.array([4.44022419, 2.94528677, 3.54323578, 2.65057213])),
('double_Utube_series', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 65., np.array([4.49394782, 3.15796672, 3.7037625, 2.90601555])),
('double_Utube_series', None, 1., np.array([65., 75.]), np.array([[4.36908096, 2.53231146, 3.13441957, 2.03763963], [4.28094228, 2.49706752, 3.19348974, 2.0612353]])),
('double_Utube_series', None, np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.44022419, 2.94528677, 3.54323578, 2.65057213], [4.37608674, 2.92420008, 3.58625745, 2.66472128]])),
('double_Utube_series', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.49394782, 3.15796672, 3.7037625, 2.90601555], [4.44772426, 3.15021634, 3.73124558, 2.90769612]])),
# Coaxial (Annular pipe is inlet pipe)
('coaxial_annular_in', None, 1., 65., np.array([3.15203088, 2.18408362])),
('coaxial_annular_in', None, np.array([1., 2., 3., 1.]), 65., np.array([3.4176666 , 2.73205968])),
('coaxial_annular_in', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 65., np.array([3.57428289, 3.01759194])),
('coaxial_annular_in', None, 1., np.array([65., 75.]), np.array([[3.15203088, 2.18408362], [2.96401382, 2.15051705]])),
('coaxial_annular_in', None, np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[3.4176666, 2.73205968], [3.2920645, 2.7081367]])),
('coaxial_annular_in', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[3.57428289, 3.01759194], [3.4962546, 2.99796987]])),
# Coaxial (Annular pipe is outlet pipe)
('coaxial_annular_out', None, 1., 65., np.array([4.50649998, 2.92933532])),
('coaxial_annular_out', None, np.array([1., 2., 3., 1.]), 65., np.array([4.62416027, 3.50307539])),
('coaxial_annular_out', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 65., np.array([4.64884211, 3.58227143])),
('coaxial_annular_out', None, 1., np.array([65., 75.]), np.array([[4.50649998, 2.92933532], [4.44976224, 3.02086677]])),
('coaxial_annular_out', None, np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.62416027, 3.50307539], [4.58402116, 3.57860389]])),
('coaxial_annular_out', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.64884211, 3.58227143], [4.61006377, 3.60745651]])),
])
def test_temperature(
pipe_fixture, segment_ratios, T_b, z, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
m_flow_borehole = 0.2 # Fluid mass flow rate [kg/s]
T_f_in = 5.0 # Inlet fluid temperature [degC]
# Fluid temperatures [degC]
T_f = pipe.get_temperature(
z, T_f_in, T_b, m_flow_borehole, fluid.cp,
segment_ratios=segment_ratios)
assert np.allclose(T_f, expected)
# Test get_outlet_temperature
@pytest.mark.parametrize("pipe_fixture, segment_ratios, T_b, expected", [
# Single U-tube
('single_Utube', None, 1., 2.712371852688313),
('single_Utube', None, np.array([1., 2., 3., 1.]), 3.1377635748663573),
('single_Utube', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 3.335501080169065),
# Double U-tube (Parallel)
('double_Utube_parallel', None, 1., 1.8553031331306218),
('double_Utube_parallel', None, np.array([1., 2., 3., 1.]), 2.4278457017624655),
('double_Utube_parallel', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 2.691532668379643),
# Double U-tube (Series)
('double_Utube_series', None, 1., 1.8983711735742064),
('double_Utube_series', None, np.array([1., 2., 3., 1.]), 2.4755999700741573),
('double_Utube_series', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 2.744018786582172),
# Coaxial (Annular pipe is inlet pipe)
('coaxial_annular_in', None, 1., 2.581130521333567),
('coaxial_annular_in', None, np.array([1., 2., 3., 1.]), 3.0276625795763357),
('coaxial_annular_in', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 3.2733787998105672),
# Coaxial (Annular pipe is outlet pipe)
('coaxial_annular_out', None, 1., 2.5811305213335674),
('coaxial_annular_out', None, np.array([1., 2., 3., 1.]), 2.981638747649938),
('coaxial_annular_out', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 3.148206616090016),
])
def test_outlet_temperature(
pipe_fixture, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
m_flow_borehole = 0.2 # Fluid mass flow rate [kg/s]
T_f_in = 5.0 # Inlet fluid temperature [degC]
# Outlet fluid temperature [degC]
T_f_out = pipe.get_outlet_temperature(
T_f_in, T_b, m_flow_borehole, fluid.cp, segment_ratios=segment_ratios)
assert np.isclose(T_f_out, expected)
# Test get_inlet_temperature
@pytest.mark.parametrize("pipe_fixture, segment_ratios, T_b, expected", [
# Single U-tube
('single_Utube', None, 1., 7.595314034714041),
('single_Utube', None, np.array([1., 2., 3., 1.]), 8.33912674339739),
('single_Utube', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 8.68487787525871),
# Double U-tube (Parallel)
('double_Utube_parallel', None, 1., 5.7977998086638305),
('double_Utube_parallel', None, np.array([1., 2., 3., 1.]), 6.526064048901171),
('double_Utube_parallel', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 6.861469307697622),
# Double U-tube (Series)
('double_Utube_series', None, 1., 5.8644202354664365),
('double_Utube_series', None, np.array([1., 2., 3., 1.]), 6.60884044665609),
('double_Utube_series', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 6.955005421937585),
# Coaxial (Annular pipe is inlet pipe)
('coaxial_annular_in', None, 1., 7.237470090568812),
('coaxial_annular_in', None, np.array([1., 2., 3., 1.]), 7.97588456424095),
('coaxial_annular_in', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 8.382216898252953),
# Coaxial (Annular pipe is outlet pipe)
('coaxial_annular_out', None, 1., 7.237470090568813),
('coaxial_annular_out', None, np.array([1., 2., 3., 1.]), 7.899776560345228),
('coaxial_annular_out', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 8.175224028526785),
])
def test_inlet_temperature(pipe_fixture, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
m_flow_borehole = 0.2 # Fluid mass flow rate [kg/s]
Q_f = -3000.0 # Total heat transfer rate [W]
# Inlet fluid temperature [degC]
T_f_in = pipe.get_inlet_temperature(
Q_f, T_b, m_flow_borehole, fluid.cp, segment_ratios=segment_ratios)
assert np.isclose(T_f_in, expected)
# Test get_borehole_heat_extraction_rate
@pytest.mark.parametrize("pipe_fixture, segment_ratios, T_b, expected", [
# Single U-tube
('single_Utube', None, 1., -1819.4736348927008),
('single_Utube', None, np.array([1., 2., 3., 1.]), np.array([-507.98022943, -330.29924271, -155.92399643, -486.93326314])),
('single_Utube', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([-211.44166366, -492.72964222, -279.36944922, -303.45149861])),
# Double U-tube (Parallel)
('double_Utube_parallel', None, 1., -2501.14645849),
('double_Utube_parallel', None, np.array([1., 2., 3., 1.]), np.array([-796.48662356, -444.22614316, -108.02227066, -697.03753979])),
('double_Utube_parallel', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([-342.52693756, -711.1206948, -251.40681559, -447.57582464])),
# Double U-tube (Series)
('double_Utube_series', None, 1., -2466.89213085),
('double_Utube_series', None, np.array([1., 2., 3., 1.]), np.array([-745.16518357, -428.05472293, -114.8035859, -719.7675482])),
('double_Utube_series', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([-319.92216781, -677.35565178, -267.23882139, -463.25689612])),
# Coaxial (Annular pipe is inlet pipe)
('coaxial_annular_in', None, 1., -1923.85692048),
('coaxial_annular_in', None, np.array([1., 2., 3., 1.]), np.array([-757.51176437, -346.76503548, -48.92829119, -415.50088061])),
('coaxial_annular_in', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([-335.11374414, -618.64814833, -139.77685268, -279.11910948])),
# Coaxial (Annular pipe is outlet pipe)
('coaxial_annular_out', None, 1., -1923.85692048),
('coaxial_annular_out', None, np.array([1., 2., 3., 1.]), np.array([-480.81667849, -324.83211948, -133.10520419, -666.55719699])),
('coaxial_annular_out', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([-199.09314899, -449.21544246, -227.76223639, -389.6723144])),
])
def test_borehole_heat_extraction_rate(
pipe_fixture, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
m_flow_borehole = 0.2 # Fluid mass flow rate [kg/s]
T_f_in = 5.0 # Inlet fluid temperature [degC]
# Borehole heat extraction rates [W]
Q_b = pipe.get_borehole_heat_extraction_rate(
T_f_in, T_b, m_flow_borehole, fluid.cp, segment_ratios=segment_ratios)
assert np.allclose(Q_b, expected)
# Test get_fluid_heat_extraction_rate
@pytest.mark.parametrize("pipe_fixture, segment_ratios, T_b, expected", [
# Single U-tube
('single_Utube', None, 1., -1819.4736348927008),
('single_Utube', None, np.array([1., 2., 3., 1.]), -1481.1367317058312),
('single_Utube', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1323.8654645418355),
# Double U-tube (Parallel)
('double_Utube_parallel', None, 1., -2501.14645849),
('double_Utube_parallel', None, np.array([1., 2., 3., 1.]), -2045.7725771641726),
('double_Utube_parallel', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1836.0481583644691),
# Double U-tube (Series)
('double_Utube_series', None, 1., -2466.89213085),
('double_Utube_series', None, np.array([1., 2., 3., 1.]), -2007.7910405893485),
('double_Utube_series', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1794.303127215247),
# Coaxial (Annular pipe is inlet pipe)
('coaxial_annular_in', None, 1., -1923.85692048),
('coaxial_annular_in', None, np.array([1., 2., 3., 1.]), -1568.705971637178),
('coaxial_annular_in', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1373.2746534366859),
# Coaxial (Annular pipe is outlet pipe)
('coaxial_annular_out', None, 1., -1923.85692048),
('coaxial_annular_out', None, np.array([1., 2., 3., 1.]), -1605.3111991367698),
('coaxial_annular_out', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1472.830819664631),
])
def test_fluid_heat_extraction_rate(
pipe_fixture, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
m_flow_borehole = 0.2 # Fluid mass flow rate [kg/s]
T_f_in = 5.0 # Inlet fluid temperature [degC]
# Fluid heat extraction rate [W]
Q_f = pipe.get_fluid_heat_extraction_rate(
T_f_in, T_b, m_flow_borehole, fluid.cp, segment_ratios=segment_ratios)
assert np.isclose(Q_f, expected)
# Test get_total_heat_extraction_rate
@pytest.mark.parametrize("pipe_fixture, segment_ratios, T_b, expected", [
# Single U-tube
('single_Utube', None, 1., -1819.4736348927008),
('single_Utube', None, np.array([1., 2., 3., 1.]), -1481.1367317058312),
('single_Utube', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1323.8654645418355),
# Double U-tube (Parallel)
('double_Utube_parallel', None, 1., -2501.14645849),
('double_Utube_parallel', None, np.array([1., 2., 3., 1.]), -2045.7725771641726),
('double_Utube_parallel', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1836.0481583644691),
# Double U-tube (Series)
('double_Utube_series', None, 1., -2466.89213085),
('double_Utube_series', None, np.array([1., 2., 3., 1.]), -2007.7910405893485),
('double_Utube_series', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1794.303127215247),
# Coaxial (Annular pipe is inlet pipe)
('coaxial_annular_in', None, 1., -1923.85692048),
('coaxial_annular_in', None, np.array([1., 2., 3., 1.]), -1568.705971637178),
('coaxial_annular_in', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1373.2746534366859),
# Coaxial (Annular pipe is outlet pipe)
('coaxial_annular_out', None, 1., -1923.85692048),
('coaxial_annular_out', None, np.array([1., 2., 3., 1.]), -1605.3111991367698),
('coaxial_annular_out', np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1472.830819664631),
])
def test_total_heat_extraction_rate(
pipe_fixture, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
m_flow_borehole = 0.2 # Fluid mass flow rate [kg/s]
T_f_in = 5.0 # Inlet fluid temperature [degC]
# Total heat extraction rate [W]
Q_t = pipe.get_total_heat_extraction_rate(
T_f_in, T_b, m_flow_borehole, fluid.cp, segment_ratios=segment_ratios)
assert np.isclose(Q_t, expected)
# =============================================================================
# Test IndependentMultipleUTube class
# =============================================================================
# Test get_temperature
@pytest.mark.parametrize(
"pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, z, expected", [
# Double U-tube
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, 1., 65., np.array([4.33561246, -0.53401739, 3.03985865, 0.28974217])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, np.array([1., 2., 3., 1.]), 65., np.array([4.40351925, -0.44632268, 3.43990994, 0.77984857])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 65., np.array([4.45571533, -0.3797212, 3.59641415, 0.96525637])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, 1., np.array([65., 75.]), np.array([[4.33561246, -0.53401739, 3.03985865, 0.28974217], [4.2430049, -0.47133142, 3.10196228, 0.25289182]])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.40351925, -0.44632268, 3.43990994, 0.77984857], [4.33450267, -0.35334337, 3.48624558, 0.72533714]])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.45571533, -0.3797212, 3.59641415, 0.96525637], [4.40441296, -0.26381686, 3.62735271, 0.89129191]])),
# Triple U-tube
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, 1., 65., np.array([4.42233734, -0.45836746, 5.35867251, 3.21820626, 0.54732768, 2.80410243])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, np.array([1., 2., 3., 1.]), 65., np.array([4.47430559, -0.3726503, 5.47798812, 3.56864252, 0.98391184, 3.36084568])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), 65., np.array([4.51735491, -0.31260448, 5.56070713, 3.7101146, 1.15077001, 3.55786056])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, 1., np.array([65., 75.]), np.array([[4.42233734, -0.45836746, 5.35867251, 3.21820626, 0.54732768, 2.80410243], [4.33960106, -0.38478969, 5.148416, 3.27807117, 0.49886893, 2.90891002]])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.47430559, -0.3726503, 5.47798812, 3.56864252, 0.98391184, 3.36084568], [4.41115886, -0.27214867, 5.30491836, 3.61292754, 0.92062819, 3.45008461]])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([65., 75.]), np.array([[4.51735491, -0.31260448, 5.56070713, 3.7101146, 1.15077001, 3.55786056], [4.4690976, -0.1924684, 5.41547435, 3.74095158, 1.07113754, 3.62431085]])),
])
def test_temperature_independent(
pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, z, expected,
request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
# Fluid temperatures [degC]
T_f = pipe.get_temperature(
z, T_f_in, T_b, m_flow, fluid.cp, segment_ratios=segment_ratios)
assert np.allclose(T_f, expected)
# Test get_outlet_temperature
@pytest.mark.parametrize(
"pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, expected", [
# Double U-tube
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, 1., np.array([2.66975268, 0.50433911])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, np.array([1., 2., 3., 1.]), np.array([3.07152772, 0.9760115])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([3.25838257, 1.19473512])),
# Triple U-tube
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, 1., np.array([2.85759687, 0.84655363, 2.20811155])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, np.array([1., 2., 3., 1.]), np.array([3.22566425, 1.26691386, 2.70350023])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([3.39731878, 1.46195771, 2.93172173])),
])
def test_outlet_temperature_independent(
pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
# Outlet fluid temperatures [degC]
T_f_out = pipe.get_outlet_temperature(
T_f_in, T_b, m_flow, fluid.cp, segment_ratios=segment_ratios)
assert np.allclose(T_f_out, expected)
# Test get_inlet_temperature
@pytest.mark.parametrize(
"pipe_fixture, m_flow, Q_f, segment_ratios, T_b, expected", [
# Double U-tube
('double_Utube_independent', np.array([0.2, 0.15]), np.array([-3000., 2000.]), None, 1., np.array([7.40595748, -3.59946781])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([-3000., 2000.]), None, np.array([1., 2., 3., 1.]), np.array([8.15037424, -2.85931237])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([-3000., 2000.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([8.49653478, -2.51603365])),
# Triple U-tube
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([-3000., 2000., -2500.]), None, 1., np.array([7.87321014, -2.88443189, 8.87646527])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([-3000., 2000., -2500.]), None, np.array([1., 2., 3., 1.]), np.array([8.62102769, -2.14384786, 9.60745667])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([-3000., 2000., -2500.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([8.96946474, -1.80024805, 9.94472697])),
])
def test_inlet_temperature_independent(
pipe_fixture, m_flow, Q_f, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
# Inlet fluid temperatures [degC]
T_f_in = pipe.get_inlet_temperature(
Q_f, T_b, m_flow, fluid.cp, segment_ratios=segment_ratios)
assert np.allclose(T_f_in, expected)
# Test get_borehole_heat_extraction_rate
@pytest.mark.parametrize(
"pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, expected", [
# Double U-tube
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, 1., -956.00963184),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, np.array([1., 2., 3., 1.]), np.array([-311.65264343, -12.22888682, 289.43868666, -320.65369703])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([-136.63705522, -61.48218337, 405.20011405, -214.40320201])),
# Triple U-tube
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, 1., -2508.09408199),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, np.array([1., 2., 3., 1.]), np.array([-754.8446691, -351.75861521, 43.23971775, -704.23081162])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([-321.07744291, -563.32294806, -1.60256316, -449.20675261])),
])
def test_borehole_heat_extraction_rate_independent(
pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
# Borehole heat extraction rates [W]
Q_b = pipe.get_borehole_heat_extraction_rate(
T_f_in, T_b, m_flow, fluid.cp, segment_ratios=segment_ratios)
assert np.allclose(Q_b, expected)
# Test get_fluid_heat_extraction_rate
@pytest.mark.parametrize(
"pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, expected", [
# Double U-tube
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, 1., np.array([-1853.37094997, 897.36131814])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, np.array([1., 2., 3., 1.]), np.array([-1533.81766537, 1178.72112475])),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([-1385.20195806, 1309.19311228])),
# Triple U-tube
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, 1., np.array([-1703.96836927, 1101.4975216, -1905.62323433])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, np.array([1., 2., 3., 1.]), np.array([-1411.22460372, 1352.24883733, -1708.61861179])),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), np.array([-1274.69852553, 1468.59548314, -1617.86019738])),
])
def test_fluid_heat_extraction_rate_independent(
pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
# Fluid heat extraction rates [W]
Q_f = pipe.get_fluid_heat_extraction_rate(
T_f_in, T_b, m_flow, fluid.cp, segment_ratios=segment_ratios)
assert np.allclose(Q_f, expected)
# Test get_total_heat_extraction_rate
@pytest.mark.parametrize(
"pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, expected", [
# Double U-tube
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, 1., -956.0096318353349),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), None, np.array([1., 2., 3., 1.]), -355.0965406202829),
('double_Utube_independent', np.array([0.2, 0.15]), np.array([5., -1.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -76.00884577783381),
# Triple U-tube
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, 1., -2508.094081991294),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), None, np.array([1., 2., 3., 1.]), -1767.5943781810597),
('triple_Utube_independent', np.array([0.2, 0.15, 0.10]), np.array([5., -1., 7.]), np.array([0.1, 0.35, 0.40, 0.15]), np.array([1., 2., 3., 1.]), -1423.9632397638252),
])
def test_total_heat_extraction_rate_independent(
pipe_fixture, m_flow, T_f_in, segment_ratios, T_b, expected, request):
# Extract pipe from fixture
pipe = request.getfixturevalue(pipe_fixture)
# Fluid is propylene-glycol 20%
fluid = gt.media.Fluid('MPG', 20.)
# Total heat extraction rate [W]
Q_t = pipe.get_total_heat_extraction_rate(
T_f_in, T_b, m_flow, fluid.cp, segment_ratios=segment_ratios)
assert np.isclose(Q_t, expected)
|
MassimoCimmino/pygfunction
|
tests/pipes_test.py
|
Python
|
bsd-3-clause
| 33,912
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of iotop (http://guichaz.free.fr/iotop/) showing real time
disk I/O statistics.
It works on Linux only (FreeBSD and OSX are missing support for IO
counters).
It doesn't work on Windows as curses module is required.
Author: Giampaolo Rodola' <g.rodola@gmail.com>
"""
import os
import sys
import psutil
if not hasattr(psutil.Process, 'io_counters') or os.name != 'posix':
sys.exit('platform not supported')
import time
import curses
import atexit
# --- curses stuff
def tear_down():
win.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
win = curses.initscr()
atexit.register(tear_down)
curses.endwin()
lineno = 0
def print_line(line, highlight=False):
"""A thin wrapper around curses's addstr()."""
global lineno
try:
if highlight:
line += " " * (win.getmaxyx()[1] - len(line))
win.addstr(lineno, 0, line, curses.A_REVERSE)
else:
win.addstr(lineno, 0, line, 0)
except curses.error:
lineno = 0
win.refresh()
raise
else:
lineno += 1
# --- /curses stuff
def bytes2human(n):
"""
>>> bytes2human(10000)
'9.8 K/s'
>>> bytes2human(100001221)
'95.4 M/s'
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.2f %s/s' % (value, s)
return '%.2f B/s' % (n)
def poll(interval):
"""Calculate IO usage by comparing IO statics before and
after the interval.
Return a tuple including all currently running processes
sorted by IO activity and total disks I/O activity.
"""
# first get a list of all processes and disk io counters
procs = [p for p in psutil.process_iter()]
for p in procs[:]:
try:
p._before = p.io_counters()
except psutil.Error:
procs.remove(p)
continue
disks_before = psutil.disk_io_counters()
# sleep some time
time.sleep(interval)
# then retrieve the same info again
for p in procs[:]:
try:
p._after = p.io_counters()
p._cmdline = ' '.join(p.cmdline())
if not p._cmdline:
p._cmdline = p.name()
p._username = p.username()
except psutil.NoSuchProcess:
procs.remove(p)
disks_after = psutil.disk_io_counters()
# finally calculate results by comparing data before and
# after the interval
for p in procs:
p._read_per_sec = p._after.read_bytes - p._before.read_bytes
p._write_per_sec = p._after.write_bytes - p._before.write_bytes
p._total = p._read_per_sec + p._write_per_sec
disks_read_per_sec = disks_after.read_bytes - disks_before.read_bytes
disks_write_per_sec = disks_after.write_bytes - disks_before.write_bytes
# sort processes by total disk IO so that the more intensive
# ones get listed first
processes = sorted(procs, key=lambda p: p._total, reverse=True)
return (processes, disks_read_per_sec, disks_write_per_sec)
def refresh_window(procs, disks_read, disks_write):
"""Print results on screen by using curses."""
curses.endwin()
templ = "%-5s %-7s %11s %11s %s"
win.erase()
disks_tot = "Total DISK READ: %s | Total DISK WRITE: %s" \
% (bytes2human(disks_read), bytes2human(disks_write))
print_line(disks_tot)
header = templ % ("PID", "USER", "DISK READ", "DISK WRITE", "COMMAND")
print_line(header, highlight=True)
for p in procs:
line = templ % (
p.pid,
p._username[:7],
bytes2human(p._read_per_sec),
bytes2human(p._write_per_sec),
p._cmdline)
try:
print_line(line)
except curses.error:
break
win.refresh()
def main():
try:
interval = 0
while 1:
args = poll(interval)
refresh_window(*args)
interval = 1
except (KeyboardInterrupt, SystemExit):
pass
if __name__ == '__main__':
main()
|
ztop/psutil
|
examples/iotop.py
|
Python
|
bsd-3-clause
| 4,394
|
from django.contrib import admin
from .models import ZapierSubscription
class ZapierSubscriptionAdmin(admin.ModelAdmin):
list_display = ('domain', 'user_id', 'repeater_id', 'event_name', 'url')
list_filter = ('domain', 'event_name')
admin.site.register(ZapierSubscription, ZapierSubscriptionAdmin)
|
dimagi/commcare-hq
|
corehq/apps/zapier/admin.py
|
Python
|
bsd-3-clause
| 311
|
from decimal import Decimal
import json
import urllib
from mock import Mock, patch
from nose.tools import eq_
import test_utils
from lib.paypal import constants
from lib.paypal.ipn import IPN
from lib.paypal.tests import samples
from lib.sellers.models import Seller, SellerPaypal
from lib.sellers.tests.utils import make_seller_paypal
from lib.transactions import constants as transaction_constants
from lib.transactions.models import Transaction
from solitude.base import APITest
@patch('lib.paypal.client.requests.post')
class TestValid(test_utils.TestCase):
def test_empty(self, post):
eq_(IPN('').is_valid(), False)
def test_not_completed(self, post):
eq_(IPN('status=something').is_valid(), False)
def test_not_valid(self, post):
post.return_value.text = 'NOPE'
post.return_value.status_code = 200
eq_(IPN('status=completed').is_valid(), False)
def test_good(self, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
eq_(IPN('status=completed').is_valid(), True)
def test_calls(self, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
eq_(IPN('status=completed').is_valid(), True)
class TestParse(test_utils.TestCase):
def create(self, data):
ipn = IPN(data)
mock = Mock()
mock.return_value = True
ipn.is_valid = mock
return ipn
def test_parse(self):
ipn = self.create('status=foo')
eq_(ipn.parse(), ({'status': 'foo'}, {}))
def test_number(self):
ipn = self.create(urllib.urlencode({'transaction[0].amount':
'USD 1.00'}))
eq_(ipn.parse(), ({}, {'0': {'amount': {'currency': 'USD',
'amount': Decimal('1.00')}}}))
def test_sample_refund(self):
ipn = self.create(urllib.urlencode(samples.sample_refund))
trans, item = ipn.parse()
eq_(trans['status'], 'COMPLETED')
eq_(item['0']['status'], 'Refunded')
eq_(item['0']['amount'],
{'currency': 'USD', 'amount': Decimal('1.00')})
def test_chained_refund(self):
ipn = self.create(urllib.urlencode(samples.sample_chained_refund))
trans, res = ipn.parse()
eq_(trans['status'], 'COMPLETED')
eq_(res['0']['status'], 'Refunded')
eq_(res['0']['is_primary_receiver'], 'true')
eq_(res['0']['amount'],
{'currency': 'USD', 'amount': Decimal('0.99')})
eq_(res['1']['is_primary_receiver'], 'false')
eq_(res['1']['amount'],
{'currency': 'USD', 'amount': Decimal('0.30')})
@patch('lib.paypal.client.requests.post')
class TestProcess(test_utils.TestCase):
def test_invalid(self, post):
ipn = IPN('')
ipn.process()
eq_(ipn.status, constants.IPN_STATUS_IGNORED)
def test_still_ignored(self, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
ipn = IPN(urllib.urlencode(samples.sample_refund))
ipn.process()
eq_(ipn.status, constants.IPN_STATUS_IGNORED)
@patch('lib.paypal.ipn.utils.completed')
def test_purchase(self, completed, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
completed.return_value = True
ipn = IPN(urllib.urlencode(samples.sample_purchase))
ipn.process()
eq_(ipn.status, constants.IPN_STATUS_OK)
@patch('lib.paypal.ipn.utils.completed')
def test_purchase_not(self, completed, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
completed.return_value = False
ipn = IPN(urllib.urlencode(samples.sample_purchase))
ipn.process()
eq_(ipn.status, constants.IPN_STATUS_IGNORED)
@patch('lib.paypal.ipn.utils.refunded')
def test_refund(self, refunded, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
refunded.return_value = True
ipn = IPN(urllib.urlencode(samples.sample_refund))
ipn.process()
eq_(ipn.status, constants.IPN_STATUS_OK)
@patch('lib.paypal.ipn.utils.reversal')
def test_reversal(self, reversal, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
reversal.return_value = True
ipn = IPN(urllib.urlencode(samples.sample_reversal))
ipn.process()
eq_(ipn.status, constants.IPN_STATUS_OK)
@patch('lib.paypal.ipn.utils.refunded')
def test_chained_refund(self, refunded, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
refunded.return_value = True
ipn = IPN(urllib.urlencode(samples.sample_chained_refund))
ipn.process()
eq_(refunded.call_count, 1)
eq_(ipn.status, constants.IPN_STATUS_OK)
@patch('lib.paypal.client.requests.post')
class TestIPNResource(APITest):
def setUp(self):
self.api_name = 'paypal'
self.uuid = 'sample:uid'
self.list_url = self.get_list_url('ipn')
self.seller, self.paypal, self.product = make_seller_paypal(self.uuid)
self.transaction = Transaction.objects.create(uuid='5678',
provider=transaction_constants.SOURCE_PAYPAL,
seller_product=self.product, amount='10', uid_support='123')
def test_nope(self, post):
res = self.client.post(self.list_url, data={})
eq_(res.status_code, 400, res.content)
def test_something(self, post):
res = self.client.post(self.list_url, data={'data': 'foo'})
eq_(res.status_code, 201)
eq_(json.loads(res.content)['status'], 'IGNORED')
def test_purchase(self, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
res = self.client.post(self.list_url, data={'data':
urllib.urlencode(samples.sample_purchase)})
eq_(res.status_code, 201)
data = json.loads(res.content)
eq_(data['status'], 'OK')
eq_(data['action'], 'PAYMENT')
eq_(data['uuid'], '5678')
eq_(data['amount'], {'currency': 'USD', 'amount': '0.01'})
def test_refund(self, post):
post.return_value.text = 'VERIFIED'
post.return_value.status_code = 200
self.transaction.status = transaction_constants.STATUS_COMPLETED
self.transaction.save()
res = self.client.post(self.list_url, data={'data':
urllib.urlencode(samples.sample_refund)})
eq_(res.status_code, 201)
data = json.loads(res.content)
eq_(data['status'], 'OK')
eq_(data['action'], 'REFUND')
eq_(data['uuid'], '5678')
eq_(data['amount'], {'currency': 'USD', 'amount': '1.00'})
|
muffinresearch/solitude
|
lib/paypal/tests/test_ipn.py
|
Python
|
bsd-3-clause
| 6,889
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Key', fields ['org_name']
db.create_index('locksmith_hub_key', ['org_name'])
# Adding index on 'Key', fields ['name']
db.create_index('locksmith_hub_key', ['name'])
# Adding index on 'Key', fields ['org_url']
db.create_index('locksmith_hub_key', ['org_url'])
# Adding index on 'Key', fields ['key']
db.create_index('locksmith_hub_key', ['key'])
def backwards(self, orm):
# Removing index on 'Key', fields ['key']
db.delete_index('locksmith_hub_key', ['key'])
# Removing index on 'Key', fields ['org_url']
db.delete_index('locksmith_hub_key', ['org_url'])
# Removing index on 'Key', fields ['name']
db.delete_index('locksmith_hub_key', ['name'])
# Removing index on 'Key', fields ['org_name']
db.delete_index('locksmith_hub_key', ['org_name'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'hub.api': {
'Meta': {'object_name': 'Api', 'db_table': "'locksmith_hub_api'"},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'documentation_link': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mode': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'push_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'querybuilder_link': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'signing_key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tools_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'hub.key': {
'Meta': {'object_name': 'Key', 'db_table': "'locksmith_hub_key'"},
'alternate_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'org_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'org_url': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'promotable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'api_key'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
},
u'hub.keypublicationstatus': {
'Meta': {'object_name': 'KeyPublicationStatus', 'db_table': "'locksmith_hub_keypublicationstatus'"},
'api': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pub_statuses'", 'to': u"orm['hub.Api']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pub_statuses'", 'to': u"orm['hub.Key']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'hub.report': {
'Meta': {'object_name': 'Report', 'db_table': "'locksmith_hub_report'"},
'api': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': u"orm['hub.Api']"}),
'calls': ('django.db.models.fields.IntegerField', [], {}),
'date': ('django.db.models.fields.DateField', [], {}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': u"orm['hub.Key']"}),
'reported_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['hub']
|
sunlightlabs/django-locksmith
|
locksmith/hub/migrations/0009_auto.py
|
Python
|
bsd-3-clause
| 9,517
|
# proxy module
from __future__ import absolute_import
from chaco.tools.broadcaster import *
|
enthought/etsproxy
|
enthought/chaco/tools/broadcaster.py
|
Python
|
bsd-3-clause
| 92
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.utils.html import format_html
from .database_maintenance_task import DatabaseMaintenanceTaskAdmin
class DatabaseUpgradeAdmin(DatabaseMaintenanceTaskAdmin):
list_filter = [
"database__team", "source_plan", "target_plan", "source_plan__engine",
"status",
]
list_display = (
"database", "database_team", "source_plan_name", "target_plan_name",
"current_step", "friendly_status", "maintenance_action", "link_task",
"started_at", "finished_at"
)
readonly_fields = (
"current_step_class", "database",
"source_plan", "source_plan_name", "target_plan", "target_plan_name",
"link_task", "started_at", "finished_at",
"status", "maintenance_action", "task_schedule"
)
def maintenance_action(self, maintenance_task):
if (not maintenance_task.is_status_error or
not maintenance_task.can_do_retry):
return 'N/A'
url = maintenance_task.database.get_upgrade_retry_url()
html = ("<a title='Retry' class='btn btn-info' "
"href='{}'>Retry</a>").format(url)
return format_html(html)
|
globocom/database-as-a-service
|
dbaas/maintenance/admin/database_upgrade.py
|
Python
|
bsd-3-clause
| 1,239
|
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.core.validators import ValidationError
from django.urls import reverse
from django.db.models import Q
from django.forms.widgets import Select
from django.utils.encoding import force_str
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _, gettext
from django.utils import timezone
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Div, Submit, HTML
from dal import autocomplete
import logging
import re
from danceschool.core.models import (
InvoiceItem, StaffMember, EventStaffCategory, Event, PublicEvent, Series
)
from danceschool.core.forms import EventAutocompleteForm
from .models import (
ExpenseItem, ExpenseCategory, RevenueItem, RepeatedExpenseRule, GenericRepeatedExpense,
LocationRentalInfo, RoomRentalInfo, StaffDefaultWage, StaffMemberWageInfo, TransactionParty
)
from .autocomplete_light_registry import get_method_list, get_approval_status_list
# Define logger for this file
logger = logging.getLogger(__name__)
PAYBY_CHOICES = (
(1, _('Hours of Work/Rental (paid at default rate)')),
(2, _('Flat Payment')),
)
class ExpenseCategoryWidget(Select):
'''
Override render_option to permit extra data of default wage to be used by JQuery
This could be optimized to reduce database calls by overriding the render function.
'''
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_str(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
# Pass the default wage rate as an option
if option_value:
defaultRate = ExpenseCategory.objects.filter(id=int(option_value)).first().defaultRate
extra_value_data = ' data-defaultRate=' + str(defaultRate)
else:
extra_value_data = ''
return format_html('<option value="{}"{}{}>{}</option>',
option_value,
selected_html,
extra_value_data,
force_str(option_label))
class ExpenseReportingForm(EventAutocompleteForm, forms.ModelForm):
payTo = forms.ModelChoiceField(
queryset=TransactionParty.objects.all(),
label=_('Pay to'),
required=True,
widget=autocomplete.ModelSelect2(
url='transactionParty-list-autocomplete',
attrs={
# This will set the input placeholder attribute:
'data-placeholder': _('Enter a name or location'),
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-minimum-input-length': 2,
'data-max-results': 8,
'class': 'modern-style',
}
)
)
payBy = forms.ChoiceField(
widget=forms.RadioSelect, choices=PAYBY_CHOICES,
label=_('Report this expense as:'), initial=2
)
paymentMethod = autocomplete.Select2ListCreateChoiceField(
choice_list=get_method_list,
required=False,
widget=autocomplete.ListSelect2(url='paymentMethod-list-autocomplete'),
label=_('Payment method'),
)
approved = autocomplete.Select2ListCreateChoiceField(
choice_list=get_approval_status_list,
required=False,
widget=autocomplete.ListSelect2(url='approved-list-autocomplete'),
label=_('Approved'),
)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
user_id = getattr(user, 'id', None)
if user_id:
kwargs.update(initial={
'payTo': TransactionParty.objects.get_or_create(
user=user, defaults={'name': user.get_full_name()}
)[0].id,
})
super().__init__(*args, **kwargs)
self.helper = FormHelper()
# The hidden input of accrual date must be passed as a naive datetime.
# Django will take care of converting it to local time
accrualDate_field = Field(
'accrualDate', type="hidden",
value=(
timezone.make_naive(timezone.now()) if
timezone.is_aware(timezone.now()) else timezone.now()
)
)
if user and user.has_perm('financial.mark_expenses_paid'):
payment_section = Div(
Div(
HTML(
'<a data-toggle="collapse" href="#collapsepayment">' +
('%s</a> (%s)' % (
_('Mark as Approved/Paid'), _('click to expand')
))
),
css_class='card-header'
),
Div(
'approved',
'paid',
Div(
Field('paymentDate', css_class='datepicker'),
HTML(
'<div style="margin: auto 1em;">' +
'<button id="payment-event-start" ' +
'class="btn btn-outline-secondary">' +
('%s</button></div>' % _('Event Start Date'))
),
css_class='form-row',
),
'paymentMethod',
HTML(
'<p style="margin-top: 30px;"><strong>' +
(
'%s</strong> %s</p>' % (
_('Note:'),
_(
'For accounting purposes, please do not ' +
'mark expenses as paid unless they have ' +
'already been paid to the recipient.'
)
)
),
),
css_class='card-body collapse',
id='collapsepayment',
),
css_class='card my-4'
)
else:
payment_section = None
# Add category button should only appear for users who are allowed to add categories
if user.has_perm('financial.add_expensecategory'):
related_url = reverse('admin:financial_expensecategory_add') + '?_to_field=id&_popup=1'
added_html = [
('<a href="%s" class="btn btn-outline-secondary ' % related_url) +
'related-widget-wrapper-link add-related" id="add_id_category"> ',
'<img src="%sadmin/img/icon-addlink.svg" width="10" height="10" alt="%s"/></a>' % (
getattr(settings, 'STATIC_URL', '/static/'), _('Add Another')
)
]
category_field = Div(
Div('category', css_class='col-sm-11'),
Div(HTML('\n'.join(added_html)), css_class='col-sm-1', style='margin-top: 25px;'),
css_class='related-widget-wrapper row'
)
else:
category_field = Div('category')
self.fields['event'].label = _('Event (optional)')
self.fields['event'].required = False
self.helper.layout = Layout(
Field('submissionUser', type="hidden", value=user_id),
'payTo',
'payBy',
category_field,
'description',
'hours',
'total',
'event',
'reimbursement',
accrualDate_field,
payment_section,
'attachment',
Submit('submit', _('Submit')),
)
def clean(self):
# Custom cleaning ensures that user, hours, and total
# are not reported where not necessary.
super().clean()
payBy = self.cleaned_data.get('payBy')
hours = self.cleaned_data.get('hours')
total = self.cleaned_data.get('total')
paid = self.cleaned_data.get('paid')
paymentDate = self.cleaned_data.get('paymentDate')
event = self.cleaned_data.get('event')
# Automatically marks expenses that are paid
# upon submission as accruing at the date of payment.
if paid and paymentDate:
self.cleaned_data['accrualDate'] = paymentDate
else:
self.cleaned_data.pop('paymentDate', None)
# If an event has been specified, then that takes precedence for setting
# the accrual date of the expense.
if event and getattr(event, 'startTime', None):
self.cleaned_data['accrualDate'] = event.startTime
if payBy == '1' and total:
self.cleaned_data.pop('total', None)
if payBy == '2' and hours:
self.cleaned_data.pop('hours', None)
return self.cleaned_data
class Meta:
model = ExpenseItem
fields = [
'submissionUser', 'payTo', 'category', 'description', 'hours',
'total', 'reimbursement', 'attachment', 'approved', 'paid',
'paymentDate', 'paymentMethod', 'accrualDate', 'event'
]
widgets = {
'category': ExpenseCategoryWidget,
}
def media(self):
''' Remove jQuery from widget media to avoid duplicate initialization by django-filer '''
media = super().media
regex = re.compile(r"^admin/.*jquery(\.min)?\.js$")
new_js = [x for x in media._js if not regex.search(x)]
new_css = media._css
media = forms.Media(js=new_js, css=new_css)
add_media = forms.Media(
js = (
'admin/js/admin/RelatedObjectLookups.js',
'bootstrap-datepicker/js/bootstrap-datepicker.min.js',
'js/expense_reporting.js',
),
css = {
'all': (
'bootstrap-datepicker/css/bootstrap-datepicker.standalone.min.css',
),
}
)
media += add_media
return media
class InvoiceItemChoiceField(forms.ModelChoiceField):
'''
This exists so that the validators for InvoiceItems (EventRegistrations) are not
thrown off by the fact that the initial query is blank.
'''
def to_python(self, value):
try:
value = super().to_python(value)
except (ValueError, ValidationError):
key = self.to_field_name or 'pk'
value = InvoiceItem.objects.filter(**{key: value})
if not value.exists():
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
else:
value = value.first()
return value
class RevenueReportingForm(EventAutocompleteForm, forms.ModelForm):
'''
This form is used in the revenue reporting view for quick generation of RevenueItems.
'''
receivedFrom = forms.ModelChoiceField(
queryset=TransactionParty.objects.all(),
label=_('Received from'),
required=False,
widget=autocomplete.ModelSelect2(
url='transactionParty-list-autocomplete',
attrs={
# This will set the input placeholder attribute:
'data-placeholder': _('Enter a name or location'),
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-minimum-input-length': 2,
'data-max-results': 8,
'class': 'modern-style',
}
)
)
currentlyHeldBy = forms.ModelChoiceField(
queryset=User.objects.filter(Q(is_staff=True) | Q(staffmember__isnull=False)),
label=_('Cash currently in possession of'),
required=False,
widget=autocomplete.ModelSelect2(
url='autocompleteUser',
attrs={
# This will set the input placeholder attribute:
'data-placeholder': _('Enter a user name'),
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-minimum-input-length': 2,
'data-max-results': 4,
'class': 'modern-style',
}
)
)
paymentMethod = autocomplete.Select2ListCreateChoiceField(
choice_list=get_method_list,
required=False,
widget=autocomplete.ListSelect2(url='paymentMethod-list-autocomplete'),
label=_('Payment method'),
)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
if hasattr(user, 'id'):
kwargs.update(initial={
'submissionUser': user.id
})
super().__init__(*args, **kwargs)
self.helper = FormHelper()
detail_section = Div(
Div(
HTML(
'<a data-toggle="collapse" href="#collapsedetails">%s</a> (%s)' % (
_('Adjustments/Fees'), _('click to expand')
)
),
css_class='card-header'
),
Div(
'total',
'adjustments',
'fees',
css_class='card-body collapse',
id='collapsedetails',
),
css_class='card my-4')
event_section = Div(
Div(
HTML(
'<a data-toggle="collapse" href="#collapseevent">%s</a> (%s)' % (
_('Event/Invoice item (optional)'), _('click to expand')
)
),
css_class='card-header'
),
Div(
'event',
'invoiceItem',
css_class='card-body collapse',
id='collapseevent',
),
css_class='card my-4')
if user and user.has_perm('financial.mark_revenues_received'):
receipt_section = Div(
Div(
HTML(
'<a data-toggle="collapse" href="#collapsereceipt">%s</a> (%s)' % (
_('Mark as Received'), _('click to expand')
)
),
css_class='card-header'
),
Div(
'received',
Div(
Field('receivedDate', css_class='datepicker'),
HTML(
'<div style="margin: auto 1em;">' +
'<button id="received-event-start" ' +
'class="btn btn-outline-secondary">' +
('%s</button></div>' % _('Event Start Date'))
),
css_class='form-row',
),
'currentlyHeldBy',
# The hidden input of accrual date must be passed as a naive datetime.
# Django will take care of converting it to local time
HTML('<p style="margin-top: 30px;"><strong>%s</strong> %s</p>' % (
_('Note:'),
_(
'For accounting purposes, please do not mark revenues ' +
'as received until the money is in our possession.'
)
)),
css_class='card-body collapse',
id='collapsereceipt',
),
css_class='card my-4')
else:
receipt_section = None
self.fields["invoiceItem"] = InvoiceItemChoiceField(queryset=InvoiceItem.objects.none(), required=False)
self.fields['event'].required = False
# Handled by the model's save() method
self.fields['total'].required = False
self.fields['fees'].required = False
self.fields['adjustments'].required = False
self.helper.layout = Layout(
Field('submissionUser', type="hidden", value=getattr(user, 'id', None)),
Field('invoiceNumber', type="hidden"),
'category',
'description',
'receivedFrom',
'paymentMethod',
'grossTotal',
detail_section,
event_section,
receipt_section,
'attachment',
Submit('submit', _('Submit')),
)
def clean_description(self):
''' Avoid empty descriptions '''
return self.cleaned_data['description'] or _('Form Submitted Revenue')
def clean_invoiceNumber(self):
''' Create a unique invoice number '''
return 'SUBMITTED_%s_%s' % (
getattr(self.cleaned_data['submissionUser'], 'id', 'None'), timezone.now().strftime('%Y%m%d%H%M%S')
)
class Meta:
model = RevenueItem
fields = [
'submissionUser', 'invoiceNumber', 'category', 'description', 'event',
'invoiceItem', 'receivedFrom', 'paymentMethod', 'currentlyHeldBy',
'grossTotal', 'total', 'adjustments', 'fees', 'attachment', 'received',
'receivedDate'
]
def media(self):
''' Remove jQuery from widget media to avoid duplicate initialization by django-filer '''
media = super().media
regex = re.compile(r"^admin/.*jquery(\.min)?\.js$")
new_js = [x for x in media._js if not regex.search(x)]
new_css = media._css
media = forms.Media(js=new_js, css=new_css)
add_media = forms.Media(
js = (
'js/revenue_reporting.js',
'bootstrap-datepicker/js/bootstrap-datepicker.min.js',
),
css = {
'all': (
'bootstrap-datepicker/css/bootstrap-datepicker.standalone.min.css',
),
}
)
media += add_media
return media
class CompensationRuleUpdateForm(forms.ModelForm):
''' Used for bulk update of StaffMember compensation rules. '''
def save(self, commit=True):
''' Handle the update logic for this in the view, not the form '''
pass
class Meta:
model = StaffMemberWageInfo
fields = ['category', 'rentalRate', 'applyRateRule', 'dayStarts', 'weekStarts', 'monthStarts']
class CompensationRuleResetForm(forms.Form):
''' Used for bulk reset of StaffMember compensation rules to category defaults '''
def save(self, commit=True):
''' Handle the update logic for this in the view, not the form '''
pass
def __init__(self, *args, **kwargs):
staffmembers = kwargs.pop('staffmembers', StaffMember.objects.none())
# Initialize a default (empty) form to fill
super().__init__(*args, **kwargs)
for cat in EventStaffCategory.objects.order_by('name'):
this_label = cat.name
this_help_text = ''
if not getattr(cat, 'defaultwage', None):
this_help_text += gettext('No default compensation specified. ')
if staffmembers:
this_help_text += gettext('{count} selected members with rules specified.').format(
count=staffmembers.filter(expenserules__category=cat).count(),
)
self.fields['category_%s' % cat.id] = forms.BooleanField(
required=False, label=this_label, help_text=this_help_text
)
self.fields['resetHow'] = forms.ChoiceField(
label=_('For each selected category:'),
choices=(
('COPY', _('Copy default rules to each staff member')),
('DELETE', _('Delete existing custom rules'))
)
)
class ExpenseRuleGenerationForm(forms.Form):
''' Generate a form with all expense rules '''
staff = forms.BooleanField(
required=False, initial=True, label=_('Generate expense items for event staff')
)
venues = forms.BooleanField(
required=False, initial=True, label=_('Generate expense items for venue rental')
)
generic = forms.BooleanField(
required=False, initial=True, label=_('Generate expense items for generic rules')
)
registrations = forms.BooleanField(
required=False, initial=True,
label=_('Generate revenue items for registrations')
)
def __init__(self, *args, **kwargs):
# Initialize a default form to fill by rule
super().__init__(*args, **kwargs)
for rule in RepeatedExpenseRule.objects.filter(disabled=False).order_by('id'):
prefix = 'genericrule'
if isinstance(rule, LocationRentalInfo):
prefix = 'locationrule'
elif isinstance(rule, RoomRentalInfo):
prefix = 'roomrule'
elif isinstance(rule, StaffDefaultWage):
prefix = 'staffdefaultrule'
elif isinstance(rule, StaffMemberWageInfo):
prefix = 'staffmemberule'
self.fields['%s_%s' % (prefix, rule.id)] = forms.BooleanField(
required=False, initial=True, label=rule.ruleName
)
|
django-danceschool/django-danceschool
|
danceschool/financial/forms.py
|
Python
|
bsd-3-clause
| 21,754
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Consumer.xauth_allowed'
db.add_column('oauth_provider_consumer', 'xauth_allowed', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Consumer.xauth_allowed'
db.delete_column('oauth_provider_consumer', 'xauth_allowed')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oauth_provider.consumer': {
'Meta': {'object_name': 'Consumer'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'xauth_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'oauth_provider.nonce': {
'Meta': {'object_name': 'Nonce'},
'consumer_key': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'token_key': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'oauth_provider.resource': {
'Meta': {'object_name': 'Resource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_readonly': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.TextField', [], {'max_length': '2083'})
},
'oauth_provider.token': {
'Meta': {'object_name': 'Token'},
'callback': ('django.db.models.fields.CharField', [], {'max_length': '2083', 'null': 'True', 'blank': 'True'}),
'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oauth_provider.Consumer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oauth_provider.Resource']"}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1327884847L'}),
'token_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}),
'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['oauth_provider']
|
amrox/django-oauth-plus
|
oauth_provider/migrations/0002_auto__add_field_consumer_xauth_allowed.py
|
Python
|
bsd-3-clause
| 6,853
|
"""
---
Lifts CLI
~~~~~~~~~
Main interface used to modify logs. Can also directly edit flat file
database.
"""
import sys
import database
DATAFILE = 'data/lifts.db'
def usage():
print 'Usage:'
print ' add: python lifts-cli.py add date liftname weightxrep1 weightxrep2 \n'+\
' weightxrep3.. "notes notes notes"'
print ' search: python lifts-cli.py search liftname'
print ''
print ' Alternatively, the data file can be editted directly'
print ''
print ' date defaults to today if not included'
print ' adding kg after liftname sets units for sets to kg. Defaults to lbs'
print ' Units: --weight=kg --weight=lbs'
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
sys.exit()
with database.dbm(DATAFILE) as dbm:
if sys.argv[1] == 'add':
dbm.add_entry(' '.join(sys.argv[1:]))
elif sys.argv[1] == 'search':
print dbm.search(sys.argv[2])
else:
print 'Unrecognized command: %s' % (sys.argv[2])
print ''
usage()
|
seenaburns/lifts
|
lifts-cli.py
|
Python
|
bsd-3-clause
| 1,087
|
#!/usr/local/bin/python
# coding: UTF-8
# released under bsd licence
# see LICENCE file or http://www.opensource.org/licenses/bsd-license.php for details
# Institute of Applied Simulation (ZHAW)
# Author Timo Jeranko
"""implementation of a self organizing map.
the implementation is based on the book
"Neural Networks - A Comprehensive Foundation" (chapter 9 - Self-Organizing Maps) by Simon Haykin
"""
|
IAS-ZHAW/machine_learning_scripts
|
mlscripts/ml/som/__init__.py
|
Python
|
bsd-3-clause
| 416
|
from pytest_bdd import scenarios, when, then, given, parsers
import xml.etree.ElementTree as ET
scenarios('features/timing/ebuttd_resolved_timings_on_elements.feature')
@then('p resulted begin time is <p_resulted_begin_time>')
def then_it_has_p_resulted_begin_time(test_context, p_resulted_begin_time):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
p_element = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p')[0]
document_generated_p_begin_time = p_element.get('begin')
if (p_resulted_begin_time == 'None'):
assert document_generated_p_begin_time is None
else:
assert p_resulted_begin_time == document_generated_p_begin_time
@then('p resulted end time is <p_resulted_end_time>')
def then_it_has_p_resulted_end_time(test_context, p_resulted_end_time):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
p_element = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p')[0]
document_generated_p_end_time = p_element.get('end')
if (p_resulted_end_time == 'None'):
assert document_generated_p_end_time is None
else:
assert p_resulted_end_time == document_generated_p_end_time
@then('nestedSpan resulted begin time is <nestedSpan_resulted_begin_time>')
def then_it_has_nestedSpan_resulted_begin_time(test_context, nestedSpan_resulted_begin_time):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
elements = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p/{http://www.w3.org/ns/ttml}span')
document_generated_span_begin_time = elements[1].get('begin')
assert nestedSpan_resulted_begin_time == document_generated_span_begin_time
@then('nestedSpan resulted end time is <nestedSpan_resulted_end_time>')
def then_it_has_nestedSpan_resulted_end_time(test_context, nestedSpan_resulted_end_time):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
elements = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p/{http://www.w3.org/ns/ttml}span')
document_generated_span_end_time = elements[1].get('end')
assert nestedSpan_resulted_end_time == document_generated_span_end_time
@then('span1 resulted begin time is <span1_resulted_begin_time>')
def then_it_has_span1_resulted_begin_time(test_context, span1_resulted_begin_time):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
elements = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p/{http://www.w3.org/ns/ttml}span')
document_generated_span_begin_time = elements[0].get('begin')
if span1_resulted_begin_time == 'None':
assert document_generated_span_begin_time is None
else:
assert span1_resulted_begin_time == document_generated_span_begin_time
@then('span2 resulted begin time is <span2_resulted_begin_time>')
def then_it_has_span2_resulted_begin_time(test_context, span2_resulted_begin_time):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
elements = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p/{http://www.w3.org/ns/ttml}span')
document_generated_span_begin_time = elements[1].get('begin')
assert span2_resulted_begin_time == document_generated_span_begin_time
@then('span1 resulted end time is <span1_resulted_end_time>')
def then_it_has_span1_resulted_end_time(test_context, span1_resulted_end_time):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
elements = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p/{http://www.w3.org/ns/ttml}span')
document_generated_span_end_time = elements[0].get('end')
if document_generated_span_end_time is not None:
assert span1_resulted_end_time == document_generated_span_end_time
else:
assert None == document_generated_span_end_time
@then('span2 resulted begin time is <span2_resulted_begin_time>')
def then_it_has_span2_resulted_begin_time(test_context, span2_resulted_begin_time):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
elements = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p/{http://www.w3.org/ns/ttml}span')
document_generated_span_end_time = elements[1].get('end')
assert span2_resulted_begin_time == document_generated_span_end_time
@then('no timings present on p')
def then_no_timings_present_on_p(test_context):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
p_elements = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p')
for p_element in p_elements:
assert 'begin' not in p_element.keys()
assert 'end' not in p_element.keys()
@then('timings present on p')
def then_timings_present_on_p(test_context):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
p_elements = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p')
for p_element in p_elements:
assert 'begin' in p_element.keys()
assert 'end' in p_element.keys()
@then('no timings present on div')
def then_no_timings_present_on_div(test_context):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
divs = tree.findall('{http://www.w3.org/ns/ttml}body/{http://www.w3.org/ns/ttml}div')
for div in divs:
assert 'begin' not in div.keys()
assert 'end' not in div.keys()
@then('no timings present on body')
def then_no_timings_present_on_body(test_context):
document = test_context['ebuttd_document']
tree = ET.fromstring(document.get_xml())
body = tree.findall('{http://www.w3.org/ns/ttml}body')[0]
assert 'begin' not in body.keys()
assert 'end' not in body.keys()
|
bbc/ebu-tt-live-toolkit
|
testing/bdd/test_ebuttd_resolved_timings_on_elements.py
|
Python
|
bsd-3-clause
| 6,347
|
from django.core.management.base import BaseCommand
from chamber.importers import BulkCSVImporter, CSVImporter
import pyprind
class ProgressBarStream(object):
"""
OutputStream wrapper to remove default linebreak at line endings.
"""
def __init__(self, stream):
"""
Wrap the given stream.
"""
self.stream = stream
def write(self, *args, **kwargs):
"""
Call the stream's write method without linebreaks at line endings.
"""
return self.stream.write(ending="", *args, **kwargs)
def flush(self):
"""
Call the stream's flush method without any extra arguments.
"""
return self.stream.flush()
class ImportCSVCommandMixin(object):
def handle(self, *args, **kwargs):
self.import_csv()
class BulkImportCSVCommand(ImportCSVCommandMixin, BulkCSVImporter, BaseCommand):
def __init__(self, *args, **kwargs):
super(BulkImportCSVCommand, self).__init__(*args, **kwargs)
self.bar = None
def _pre_import_rows(self, row_count):
self.bar = pyprind.ProgBar(row_count, stream=ProgressBarStream(self.stdout))
def _post_batch_create(self, created_count, row_count):
self.bar.update(iterations=created_count)
def _post_import_rows(self, created_count, updated_count=0):
self.stdout.write('\nCreated {created} {model_name}.'.format(
created=created_count,
model_name=self.model_class._meta.verbose_name_plural # pylint: disable=W0212
))
class ImportCSVCommand(ImportCSVCommandMixin, CSVImporter, BaseCommand):
def _post_import_rows(self, created_count, updated_count=0):
self.stdout.write('Created {created} {model_name} and {updated} updated.'.format(
created=created_count,
model_name=self.model_class._meta.verbose_name_plural, # pylint: disable=W0212
updated=updated_count)
)
|
matllubos/django-chamber
|
chamber/commands/__init__.py
|
Python
|
bsd-3-clause
| 1,952
|
from .middleware import OpenTracingMiddleware
from .tracer import DjangoTracer
|
kcamenzind/django_opentracing
|
django_opentracing/__init__.py
|
Python
|
bsd-3-clause
| 78
|
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
def main():
description = ('Generates RST documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='rst2rst',
description=description)
|
benoitbryon/rst2rst
|
rst2rst/scripts/rst2rst.py
|
Python
|
bsd-3-clause
| 380
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import itertools
import os
import pytest
from cryptography.exceptions import (
AlreadyFinalized, AlreadyUpdated, InvalidSignature, InvalidTag,
NotYetFinalized
)
from cryptography.hazmat.primitives import hashes, hmac
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand
from cryptography.hazmat.primitives.kdf.kbkdf import (
CounterLocation, KBKDFHMAC, Mode
)
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from ...utils import load_vectors_from_file
def _load_all_params(path, file_names, param_loader):
all_params = []
for file_name in file_names:
all_params.extend(
load_vectors_from_file(os.path.join(path, file_name), param_loader)
)
return all_params
def generate_encrypt_test(param_loader, path, file_names, cipher_factory,
mode_factory):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_encryption(self, backend, params):
encrypt_test(backend, cipher_factory, mode_factory, params)
return test_encryption
def encrypt_test(backend, cipher_factory, mode_factory, params):
plaintext = params["plaintext"]
ciphertext = params["ciphertext"]
cipher = Cipher(
cipher_factory(**params),
mode_factory(**params),
backend=backend
)
encryptor = cipher.encryptor()
actual_ciphertext = encryptor.update(binascii.unhexlify(plaintext))
actual_ciphertext += encryptor.finalize()
assert actual_ciphertext == binascii.unhexlify(ciphertext)
decryptor = cipher.decryptor()
actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))
actual_plaintext += decryptor.finalize()
assert actual_plaintext == binascii.unhexlify(plaintext)
def generate_aead_test(param_loader, path, file_names, cipher_factory,
mode_factory):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_aead(self, backend, params):
aead_test(backend, cipher_factory, mode_factory, params)
return test_aead
def aead_test(backend, cipher_factory, mode_factory, params):
if params.get("pt") is not None:
plaintext = params["pt"]
ciphertext = params["ct"]
aad = params["aad"]
if params.get("fail") is True:
cipher = Cipher(
cipher_factory(binascii.unhexlify(params["key"])),
mode_factory(binascii.unhexlify(params["iv"]),
binascii.unhexlify(params["tag"]),
len(binascii.unhexlify(params["tag"]))),
backend
)
decryptor = cipher.decryptor()
decryptor.authenticate_additional_data(binascii.unhexlify(aad))
actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))
with pytest.raises(InvalidTag):
decryptor.finalize()
else:
cipher = Cipher(
cipher_factory(binascii.unhexlify(params["key"])),
mode_factory(binascii.unhexlify(params["iv"]), None),
backend
)
encryptor = cipher.encryptor()
encryptor.authenticate_additional_data(binascii.unhexlify(aad))
actual_ciphertext = encryptor.update(binascii.unhexlify(plaintext))
actual_ciphertext += encryptor.finalize()
tag_len = len(binascii.unhexlify(params["tag"]))
assert binascii.hexlify(encryptor.tag[:tag_len]) == params["tag"]
cipher = Cipher(
cipher_factory(binascii.unhexlify(params["key"])),
mode_factory(binascii.unhexlify(params["iv"]),
binascii.unhexlify(params["tag"]),
min_tag_length=tag_len),
backend
)
decryptor = cipher.decryptor()
decryptor.authenticate_additional_data(binascii.unhexlify(aad))
actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))
actual_plaintext += decryptor.finalize()
assert actual_plaintext == binascii.unhexlify(plaintext)
def generate_stream_encryption_test(param_loader, path, file_names,
cipher_factory):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_stream_encryption(self, backend, params):
stream_encryption_test(backend, cipher_factory, params)
return test_stream_encryption
def stream_encryption_test(backend, cipher_factory, params):
plaintext = params["plaintext"]
ciphertext = params["ciphertext"]
offset = params["offset"]
cipher = Cipher(cipher_factory(**params), None, backend=backend)
encryptor = cipher.encryptor()
# throw away offset bytes
encryptor.update(b"\x00" * int(offset))
actual_ciphertext = encryptor.update(binascii.unhexlify(plaintext))
actual_ciphertext += encryptor.finalize()
assert actual_ciphertext == binascii.unhexlify(ciphertext)
decryptor = cipher.decryptor()
decryptor.update(b"\x00" * int(offset))
actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))
actual_plaintext += decryptor.finalize()
assert actual_plaintext == binascii.unhexlify(plaintext)
def generate_hash_test(param_loader, path, file_names, hash_cls):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_hash(self, backend, params):
hash_test(backend, hash_cls, params)
return test_hash
def hash_test(backend, algorithm, params):
msg, md = params
m = hashes.Hash(algorithm, backend=backend)
m.update(binascii.unhexlify(msg))
expected_md = md.replace(" ", "").lower().encode("ascii")
assert m.finalize() == binascii.unhexlify(expected_md)
def generate_base_hash_test(algorithm, digest_size, block_size):
def test_base_hash(self, backend):
base_hash_test(backend, algorithm, digest_size, block_size)
return test_base_hash
def base_hash_test(backend, algorithm, digest_size, block_size):
m = hashes.Hash(algorithm, backend=backend)
assert m.algorithm.digest_size == digest_size
assert m.algorithm.block_size == block_size
m_copy = m.copy()
assert m != m_copy
assert m._ctx != m_copy._ctx
m.update(b"abc")
copy = m.copy()
copy.update(b"123")
m.update(b"123")
assert copy.finalize() == m.finalize()
def generate_long_string_hash_test(hash_factory, md):
def test_long_string_hash(self, backend):
long_string_hash_test(backend, hash_factory, md)
return test_long_string_hash
def long_string_hash_test(backend, algorithm, md):
m = hashes.Hash(algorithm, backend=backend)
m.update(b"a" * 1000000)
assert m.finalize() == binascii.unhexlify(md.lower().encode("ascii"))
def generate_base_hmac_test(hash_cls):
def test_base_hmac(self, backend):
base_hmac_test(backend, hash_cls)
return test_base_hmac
def base_hmac_test(backend, algorithm):
key = b"ab"
h = hmac.HMAC(binascii.unhexlify(key), algorithm, backend=backend)
h_copy = h.copy()
assert h != h_copy
assert h._ctx != h_copy._ctx
def generate_hmac_test(param_loader, path, file_names, algorithm):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_hmac(self, backend, params):
hmac_test(backend, algorithm, params)
return test_hmac
def hmac_test(backend, algorithm, params):
msg, md, key = params
h = hmac.HMAC(binascii.unhexlify(key), algorithm, backend=backend)
h.update(binascii.unhexlify(msg))
assert h.finalize() == binascii.unhexlify(md.encode("ascii"))
def generate_pbkdf2_test(param_loader, path, file_names, algorithm):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_pbkdf2(self, backend, params):
pbkdf2_test(backend, algorithm, params)
return test_pbkdf2
def pbkdf2_test(backend, algorithm, params):
# Password and salt can contain \0, which should be loaded as a null char.
# The NIST loader loads them as literal strings so we replace with the
# proper value.
kdf = PBKDF2HMAC(
algorithm,
int(params["length"]),
params["salt"],
int(params["iterations"]),
backend
)
derived_key = kdf.derive(params["password"])
assert binascii.hexlify(derived_key) == params["derived_key"]
def generate_aead_exception_test(cipher_factory, mode_factory):
def test_aead_exception(self, backend):
aead_exception_test(backend, cipher_factory, mode_factory)
return test_aead_exception
def aead_exception_test(backend, cipher_factory, mode_factory):
cipher = Cipher(
cipher_factory(binascii.unhexlify(b"0" * 32)),
mode_factory(binascii.unhexlify(b"0" * 24)),
backend
)
encryptor = cipher.encryptor()
encryptor.update(b"a" * 16)
with pytest.raises(NotYetFinalized):
encryptor.tag
with pytest.raises(AlreadyUpdated):
encryptor.authenticate_additional_data(b"b" * 16)
encryptor.finalize()
with pytest.raises(AlreadyFinalized):
encryptor.authenticate_additional_data(b"b" * 16)
with pytest.raises(AlreadyFinalized):
encryptor.update(b"b" * 16)
with pytest.raises(AlreadyFinalized):
encryptor.finalize()
cipher = Cipher(
cipher_factory(binascii.unhexlify(b"0" * 32)),
mode_factory(binascii.unhexlify(b"0" * 24), b"0" * 16),
backend
)
decryptor = cipher.decryptor()
decryptor.update(b"a" * 16)
with pytest.raises(AttributeError):
decryptor.tag
def generate_aead_tag_exception_test(cipher_factory, mode_factory):
def test_aead_tag_exception(self, backend):
aead_tag_exception_test(backend, cipher_factory, mode_factory)
return test_aead_tag_exception
def aead_tag_exception_test(backend, cipher_factory, mode_factory):
cipher = Cipher(
cipher_factory(binascii.unhexlify(b"0" * 32)),
mode_factory(binascii.unhexlify(b"0" * 24)),
backend
)
with pytest.raises(ValueError):
cipher.decryptor()
with pytest.raises(ValueError):
mode_factory(binascii.unhexlify(b"0" * 24), b"000")
with pytest.raises(ValueError):
mode_factory(binascii.unhexlify(b"0" * 24), b"000000", 2)
cipher = Cipher(
cipher_factory(binascii.unhexlify(b"0" * 32)),
mode_factory(binascii.unhexlify(b"0" * 24), b"0" * 16),
backend
)
with pytest.raises(ValueError):
cipher.encryptor()
def hkdf_derive_test(backend, algorithm, params):
hkdf = HKDF(
algorithm,
int(params["l"]),
salt=binascii.unhexlify(params["salt"]) or None,
info=binascii.unhexlify(params["info"]) or None,
backend=backend
)
okm = hkdf.derive(binascii.unhexlify(params["ikm"]))
assert okm == binascii.unhexlify(params["okm"])
def hkdf_extract_test(backend, algorithm, params):
hkdf = HKDF(
algorithm,
int(params["l"]),
salt=binascii.unhexlify(params["salt"]) or None,
info=binascii.unhexlify(params["info"]) or None,
backend=backend
)
prk = hkdf._extract(binascii.unhexlify(params["ikm"]))
assert prk == binascii.unhexlify(params["prk"])
def hkdf_expand_test(backend, algorithm, params):
hkdf = HKDFExpand(
algorithm,
int(params["l"]),
info=binascii.unhexlify(params["info"]) or None,
backend=backend
)
okm = hkdf.derive(binascii.unhexlify(params["prk"]))
assert okm == binascii.unhexlify(params["okm"])
def generate_hkdf_test(param_loader, path, file_names, algorithm):
all_params = _load_all_params(path, file_names, param_loader)
all_tests = [hkdf_extract_test, hkdf_expand_test, hkdf_derive_test]
@pytest.mark.parametrize(
("params", "hkdf_test"),
itertools.product(all_params, all_tests)
)
def test_hkdf(self, backend, params, hkdf_test):
hkdf_test(backend, algorithm, params)
return test_hkdf
def generate_kbkdf_counter_mode_test(param_loader, path, file_names):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_kbkdf(self, backend, params):
kbkdf_counter_mode_test(backend, params)
return test_kbkdf
def kbkdf_counter_mode_test(backend, params):
supported_algorithms = {
'hmac_sha1': hashes.SHA1,
'hmac_sha224': hashes.SHA224,
'hmac_sha256': hashes.SHA256,
'hmac_sha384': hashes.SHA384,
'hmac_sha512': hashes.SHA512,
}
supported_counter_locations = {
"before_fixed": CounterLocation.BeforeFixed,
"after_fixed": CounterLocation.AfterFixed,
}
algorithm = supported_algorithms.get(params.get('prf'))
if algorithm is None or not backend.hmac_supported(algorithm()):
pytest.skip("KBKDF does not support algorithm: {0}".format(
params.get('prf')
))
ctr_loc = supported_counter_locations.get(params.get("ctrlocation"))
if ctr_loc is None or not isinstance(ctr_loc, CounterLocation):
pytest.skip("Does not support counter location: {0}".format(
params.get('ctrlocation')
))
ctrkdf = KBKDFHMAC(
algorithm(),
Mode.CounterMode,
params['l'] // 8,
params['rlen'] // 8,
None,
ctr_loc,
None,
None,
binascii.unhexlify(params['fixedinputdata']),
backend=backend)
ko = ctrkdf.derive(binascii.unhexlify(params['ki']))
assert binascii.hexlify(ko) == params["ko"]
def generate_rsa_verification_test(param_loader, path, file_names, hash_alg,
pad_factory):
all_params = _load_all_params(path, file_names, param_loader)
all_params = [i for i in all_params
if i["algorithm"] == hash_alg.name.upper()]
@pytest.mark.parametrize("params", all_params)
def test_rsa_verification(self, backend, params):
rsa_verification_test(backend, params, hash_alg, pad_factory)
return test_rsa_verification
def rsa_verification_test(backend, params, hash_alg, pad_factory):
public_numbers = rsa.RSAPublicNumbers(
e=params["public_exponent"],
n=params["modulus"]
)
public_key = public_numbers.public_key(backend)
pad = pad_factory(params, hash_alg)
verifier = public_key.verifier(
binascii.unhexlify(params["s"]),
pad,
hash_alg
)
verifier.update(binascii.unhexlify(params["msg"]))
if params["fail"]:
with pytest.raises(InvalidSignature):
verifier.verify()
else:
verifier.verify()
def _check_rsa_private_numbers(skey):
assert skey
pkey = skey.public_numbers
assert pkey
assert pkey.e
assert pkey.n
assert skey.d
assert skey.p * skey.q == pkey.n
assert skey.dmp1 == rsa.rsa_crt_dmp1(skey.d, skey.p)
assert skey.dmq1 == rsa.rsa_crt_dmq1(skey.d, skey.q)
assert skey.iqmp == rsa.rsa_crt_iqmp(skey.p, skey.q)
def _check_dsa_private_numbers(skey):
assert skey
pkey = skey.public_numbers
params = pkey.parameter_numbers
assert pow(params.g, skey.x, params.p) == pkey.y
|
Ayrx/cryptography
|
tests/hazmat/primitives/utils.py
|
Python
|
bsd-3-clause
| 15,812
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.common import ByteRun, ByteRuns, Hash
import cybox.test
from cybox.test import EntityTestCase
class TestByteRun(EntityTestCase, unittest.TestCase):
klass = ByteRun
_full_dict = {
'offset': 1000,
'file_system_offset': 1024,
'image_offset': 512,
'length': 10,
'hashes': [{'type': Hash.TYPE_MD5,
'simple_hash_value':
'0123456789abcdef0123456789abcdef'}],
'byte_run_data': "helloworld",
}
class TestByteRuns(unittest.TestCase):
def test_round_trip(self):
byteruns_list = [
{'byte_run_data': "a",
'length': 1},
{'byte_run_data': "blahblah",
'length': 8},
{'byte_run_data': "aeiou",
'length': 5},
]
byteruns_list2 = cybox.test.round_trip_list(ByteRuns, byteruns_list)
self.assertEqual(byteruns_list, byteruns_list2)
if __name__ == "__main__":
unittest.main()
|
CybOXProject/python-cybox
|
cybox/test/common/byterun_test.py
|
Python
|
bsd-3-clause
| 1,198
|
import sys, os
_filename=os.path.join(os.path.dirname(__file__), '..')
sys.path.append(_filename)
#from test_DoubleSplitMix import DSMTest
#DSMTest({}).run().analyse()
#from test_DoublePinchHex import DoublePinchHexTest
#DoublePinchHexTest({}).run().analyse().plot()
#from test_Flashsep import FlashSepTest
#FlashSepTest({}).run().analyse()
# Turbine
#from test_Turbine import TurbineTest
#TurbineTest({}).run().analyse()
# Heatex
#from test_Heatex import HeatexTest
#hx = HeatexTest({}).run().analyse().plot()
# Storage
#from test_Storage import StorageTest
#hx = StorageTest({}).run().analyse().plot()
from dna.test.test_Discharge import DischargeTest
hx = DischargeTest({}).run().analyse().plot()
#from test_ThreeStepDischarge import ThreeStepDischargeTest
#hx = ThreeStepDischargeTest({}).run().analyse().plot()
#from test_Condenser import CondenserTest
#hx = CondenserTest({}).run().analyse().plot()
# Receiver
#from test_Receiver import ReceiverTest
#hx = ReceiverTest({}).run().analyse()
|
mwoc/pydna
|
test.py
|
Python
|
bsd-3-clause
| 1,005
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yin, Haichao<haichaox.yin@intel.com>
import os
import sys
import commands
import shutil
import comm
def generate_unittest():
try:
flag = ''
num = 0
reportPath = os.path.join(comm.SCRIPT_DIR_NAME, '../report')
comm.setUp()
positive_datas = ['org.xwalk.tests', 'org.xwalk.t1234', 'org.example.xwal_', 'org.example.te_st', 'or_g.example.foo', 'org000.example.foo', 'org.example123.foo']
negative_datas = ['org.xwalk', 'test', 'org.example.1234test', 'org.example.1234', '123org.example.foo', 'org.123example.foo', 'org.example._xwalk', 'org.xwalk.Tests', '_org.example.foo', 'org.xwalk.node']
if os.path.exists(reportPath):
shutil.rmtree(reportPath)
os.mkdir(reportPath)
pkgNameTestPath = os.path.join(comm.SCRIPT_DIR_NAME, "pkgName.py")
if os.path.exists(pkgNameTestPath):
os.remove(pkgNameTestPath)
testfile = open(pkgNameTestPath,'a+')
testTitle = '''#!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<hongjuanx.wang@intel.com>'''
testfile.write(testTitle + "\n")
testfile.write(
"\nimport random,os,sys,unittest,allpairs \nreload(sys) \nsys.setdefaultencoding( \"utf-8\" ) \nclass TestCaseUnit(unittest.TestCase): \n "
)
for positive_data in positive_datas:
num += 1
flag = 'positive' + str(num)
cmd = positive_data
casenum = "\n def test_pkgName_" + flag + "(self):\n self.assertEqual(\"PASS\", allpairs.tryRunApp(\"" + flag +"\", \"" + cmd+ "\"))"+ "\n"
testfile.write(casenum)
for negative_data in negative_datas:
num += 1
flag = 'negative' + str(num)
cmd = negative_data
casenum = "\n def test_pkgName_" + flag + "(self):\n self.assertEqual(\"PASS\", allpairs.tryRunApp(\"" + flag +"\", \"" + cmd+ "\"))"+ "\n"
testfile.write(casenum)
testfile.write("\nif __name__ == '__main__':\n unittest.main()")
testfile.close()
os.system("chmod +x " + pkgNameTestPath)
except Exception,e:
print Exception,"Generate pkgName.py error:",e
sys.exit(1)
def tryRunApp(item, projectName):
try:
comm.setUp()
flag = item[:10].strip()
os.system("chmod 777 " + comm.TEMP_DATA_PATH)
os.chdir(comm.TEMP_DATA_PATH)
cmd = "crosswalk-app create " + projectName
packstatus = commands.getstatusoutput(cmd)
if 'negative' in flag:
if 'ERROR' in packstatus[1]:
result = 'PASS'
print "%21s\tFAIL\tFAIL" % projectName
if projectName in os.listdir(comm.TEMP_DATA_PATH):
result = 'FAIL'
print "%21s\tFAIL\tPASS" % projectName
else:
result = 'FAIL'
print "%21s\tFAIL\tPASS" % projectName
elif 'positive' in flag:
if packstatus[0] == 0:
result = 'PASS'
print "%21s\tPASS\tPASS" % projectName
else:
result = 'FAIL'
print "%21s\tPASS\tFAIL" % projectName
comm.cleanTempData(projectName)
os.chdir(comm.SCRIPT_DIR_NAME)
os.system("rm -R " + comm.TEMP_DATA_PATH)
return result
except Exception,e:
print Exception,"Run pkgName.py error:",e
sys.exit(1)
if __name__ == '__main__':
generate_unittest()
|
pk-sam/crosswalk-test-suite
|
apptools/apptools-linux-tests/apptools/allpairs.py
|
Python
|
bsd-3-clause
| 6,514
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command to install/update gcloud components."""
import argparse
from googlecloudsdk.calliope import base
from googlecloudsdk.core.console import console_io
# This command is silent as does not produce any resource output.
# In fact it should not run any display code, as the installation has changed
# and current run state is invalid in relation to new instalation.
class Update(base.SilentCommand):
"""Update all of your installed components to the latest version.
Ensure that the latest version of all installed components is installed on the
local workstation.
"""
detailed_help = {
'DESCRIPTION': """\
{description}
The command lists all components it is about to update, and asks for
confirmation before proceeding.
By default, this command will update all components to their latest
version. This can be configured by using the --version flag to choose
a specific version to update to. This version may also be a version
older than the one that is currently installed.
You can see your current Cloud SDK version by running:
$ {top_command} version
""",
'EXAMPLES': """\
To update all installed components to the latest version:
$ {command}
To update all installed components to version 1.2.3:
$ {command} --version 1.2.3
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'--version',
help='An optional Cloud SDK version to update your components to. By '
'default, components are updated to the latest available version.')
parser.add_argument(
'component_ids',
metavar='COMPONENT-IDS',
nargs='*',
help=argparse.SUPPRESS)
parser.add_argument(
'--allow-no-backup',
required=False,
action='store_true',
help=argparse.SUPPRESS)
def Run(self, args):
"""Runs the list command."""
if args.component_ids and not args.version:
install = console_io.PromptContinue(
message='You have specified individual components to update. If you '
'are trying to install new components, use:\n $ gcloud '
'components install {components}'.format(
components=' '.join(args.component_ids)),
prompt_string='Do you want to run install instead',
default=False,
throw_if_unattended=False,
cancel_on_no=False)
if install:
self.group.update_manager.Install(
args.component_ids, allow_no_backup=args.allow_no_backup)
return
self.group.update_manager.Update(
args.component_ids, allow_no_backup=args.allow_no_backup,
version=args.version)
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/components/update.py
|
Python
|
bsd-3-clause
| 3,377
|
# BSD 3-Clause License
#
# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
import logging
import logging.handlers
import math
import os
import re
import socket
import threading
from elasticapm.conf.constants import BASE_SANITIZE_FIELD_NAMES
from elasticapm.utils import compat, starmatch_to_regex
from elasticapm.utils.logging import get_logger
from elasticapm.utils.threading import IntervalTimer, ThreadManager
__all__ = ("setup_logging", "Config")
logger = get_logger("elasticapm.conf")
log_levels_map = {
"trace": 5,
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"warn": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
"off": 1000,
}
logfile_set_up = False
class ConfigurationError(ValueError):
def __init__(self, msg, field_name):
self.field_name = field_name
super(ValueError, self).__init__(msg)
class _ConfigValue(object):
"""
Base class for configuration values
dict_key
String representing the key used for this config value in dict configs.
env_key
String representing the key used in environment variables for this
config value. If not specified, will be set to `"ELASTIC_APM_" + dict_key`.
type
Type of value stored in this config value.
validators
List of validator classes. Must be callables, which will be called with
a value and the dict_key for the config value. The validator either
returns the validated value or raises a ConfigurationError if validation
fails.
callbacks
List of functions which will be called when the config value is updated.
The callbacks must match this signature:
callback(dict_key, old_value, new_value, config_instance)
Note that callbacks wait until the end of any given `update()` operation
and are called at this point. This, coupled with the fact that callbacks
receive the config instance, means that callbacks can utilize multiple
configuration values (such as is the case for logging). This is
complicated if more than one of the involved config values are
dynamic, as both would need callbacks and the callback would need to
be idempotent.
callbacks_on_default
Whether the callback should be called on config initialization if the
default value is used. Default: True
default
The default for this config value if not user-configured.
required
Whether this config value is required. If a default is specified,
this is a redundant option (except to ensure that this config value
is specified if a default were ever to be removed).
Note that _ConfigValues and any inheriting classes must implement __set__
and __get__. The calling instance will always be a _ConfigBase descendant
and the __set__ and __get__ calls will access `instance._values[self.dict_key]`
to get and set values.
"""
def __init__(
self,
dict_key,
env_key=None,
type=compat.text_type,
validators=None,
callbacks=None,
callbacks_on_default=True,
default=None,
required=False,
):
self.type = type
self.dict_key = dict_key
self.validators = validators
self.callbacks = callbacks
self.default = default
self.required = required
if env_key is None:
env_key = "ELASTIC_APM_" + dict_key
self.env_key = env_key
self.callbacks_on_default = callbacks_on_default
def __get__(self, instance, owner):
if instance:
return instance._values.get(self.dict_key, self.default)
else:
return self.default
def __set__(self, config_instance, value):
value = self._validate(config_instance, value)
self._callback_if_changed(config_instance, value)
config_instance._values[self.dict_key] = value
def _validate(self, instance, value):
if value is None and self.required:
raise ConfigurationError(
"Configuration error: value for {} is required.".format(self.dict_key), self.dict_key
)
if self.validators and value is not None:
for validator in self.validators:
value = validator(value, self.dict_key)
if self.type and value is not None:
try:
value = self.type(value)
except ValueError as e:
raise ConfigurationError("{}: {}".format(self.dict_key, compat.text_type(e)), self.dict_key)
instance._errors.pop(self.dict_key, None)
return value
def _callback_if_changed(self, instance, new_value):
"""
If the value changed (checked against instance._values[self.dict_key]),
then run the callback function (if defined)
"""
old_value = instance._values.get(self.dict_key, self.default)
if old_value != new_value:
instance.callbacks_queue.append((self.dict_key, old_value, new_value))
def call_callbacks(self, old_value, new_value, config_instance):
if not self.callbacks:
return
for callback in self.callbacks:
try:
callback(self.dict_key, old_value, new_value, config_instance)
except Exception as e:
raise ConfigurationError(
"Callback {} raised an exception when setting {} to {}: {}".format(
callback, self.dict_key, new_value, e
),
self.dict_key,
)
class _ListConfigValue(_ConfigValue):
def __init__(self, dict_key, list_separator=",", **kwargs):
self.list_separator = list_separator
super(_ListConfigValue, self).__init__(dict_key, **kwargs)
def __set__(self, instance, value):
if isinstance(value, compat.string_types):
value = value.split(self.list_separator)
elif value is not None:
value = list(value)
if value:
value = [self.type(item) for item in value]
self._callback_if_changed(instance, value)
instance._values[self.dict_key] = value
class _DictConfigValue(_ConfigValue):
def __init__(self, dict_key, item_separator=",", keyval_separator="=", **kwargs):
self.item_separator = item_separator
self.keyval_separator = keyval_separator
super(_DictConfigValue, self).__init__(dict_key, **kwargs)
def __set__(self, instance, value):
if isinstance(value, compat.string_types):
items = (item.split(self.keyval_separator) for item in value.split(self.item_separator))
value = {key.strip(): self.type(val.strip()) for key, val in items}
elif not isinstance(value, dict):
# TODO: better error handling
value = None
self._callback_if_changed(instance, value)
instance._values[self.dict_key] = value
class _BoolConfigValue(_ConfigValue):
def __init__(self, dict_key, true_string="true", false_string="false", **kwargs):
self.true_string = true_string
self.false_string = false_string
super(_BoolConfigValue, self).__init__(dict_key, **kwargs)
def __set__(self, instance, value):
if isinstance(value, compat.string_types):
if value.lower() == self.true_string:
value = True
elif value.lower() == self.false_string:
value = False
self._callback_if_changed(instance, value)
instance._values[self.dict_key] = bool(value)
class RegexValidator(object):
def __init__(self, regex, verbose_pattern=None):
self.regex = regex
self.verbose_pattern = verbose_pattern or regex
def __call__(self, value, field_name):
value = compat.text_type(value)
match = re.match(self.regex, value)
if match:
return value
raise ConfigurationError("{} does not match pattern {}".format(value, self.verbose_pattern), field_name)
class UnitValidator(object):
def __init__(self, regex, verbose_pattern, unit_multipliers):
self.regex = regex
self.verbose_pattern = verbose_pattern
self.unit_multipliers = unit_multipliers
def __call__(self, value, field_name):
value = compat.text_type(value)
match = re.match(self.regex, value, re.IGNORECASE)
if not match:
raise ConfigurationError("{} does not match pattern {}".format(value, self.verbose_pattern), field_name)
val, unit = match.groups()
try:
val = int(val) * self.unit_multipliers[unit]
except KeyError:
raise ConfigurationError("{} is not a supported unit".format(unit), field_name)
return val
class PrecisionValidator(object):
"""
Forces a float value to `precision` digits of precision.
Rounds half away from zero.
If `minimum` is provided, and the value rounds to 0 (but was not zero to
begin with), use the minimum instead.
"""
def __init__(self, precision=0, minimum=None):
self.precision = precision
self.minimum = minimum
def __call__(self, value, field_name):
try:
value = float(value)
except ValueError:
raise ConfigurationError("{} is not a float".format(value), field_name)
multiplier = 10 ** self.precision
rounded = math.floor(value * multiplier + 0.5) / multiplier
if rounded == 0 and self.minimum and value != 0:
rounded = self.minimum
return rounded
duration_validator = UnitValidator(
r"^((?:-)?\d+)(us|ms|s|m)$", r"\d+(us|ms|s|m)", {"us": 0.001, "ms": 1, "s": 1000, "m": 60000}
)
size_validator = UnitValidator(
r"^(\d+)(b|kb|mb|gb)$", r"\d+(b|KB|MB|GB)", {"b": 1, "kb": 1024, "mb": 1024 * 1024, "gb": 1024 * 1024 * 1024}
)
class ExcludeRangeValidator(object):
def __init__(self, range_start, range_end, range_desc):
self.range_start = range_start
self.range_end = range_end
self.range_desc = range_desc
def __call__(self, value, field_name):
if self.range_start <= value <= self.range_end:
raise ConfigurationError(
"{} cannot be in range: {}".format(
value, self.range_desc.format(**{"range_start": self.range_start, "range_end": self.range_end})
),
field_name,
)
return value
class FileIsReadableValidator(object):
def __call__(self, value, field_name):
value = os.path.normpath(value)
if not os.path.exists(value):
raise ConfigurationError("{} does not exist".format(value), field_name)
elif not os.path.isfile(value):
raise ConfigurationError("{} is not a file".format(value), field_name)
elif not os.access(value, os.R_OK):
raise ConfigurationError("{} is not readable".format(value), field_name)
return value
class EnumerationValidator(object):
"""
Validator which ensures that a given config value is chosen from a list
of valid string options.
"""
def __init__(self, valid_values, case_sensitive=False):
"""
valid_values
List of valid string values for the config value
case_sensitive
Whether to compare case when comparing a value to the valid list.
Defaults to False (case-insensitive)
"""
self.case_sensitive = case_sensitive
if case_sensitive:
self.valid_values = {s: s for s in valid_values}
else:
self.valid_values = {s.lower(): s for s in valid_values}
def __call__(self, value, field_name):
if self.case_sensitive:
ret = self.valid_values.get(value)
else:
ret = self.valid_values.get(value.lower())
if ret is None:
raise ConfigurationError(
"{} is not in the list of valid values: {}".format(value, list(self.valid_values.values())), field_name
)
return ret
def _log_level_callback(dict_key, old_value, new_value, config_instance):
elasticapm_logger = logging.getLogger("elasticapm")
elasticapm_logger.setLevel(log_levels_map.get(new_value, 100))
global logfile_set_up
if not logfile_set_up and config_instance.log_file:
logfile_set_up = True
filehandler = logging.handlers.RotatingFileHandler(
config_instance.log_file, maxBytes=config_instance.log_file_size, backupCount=1
)
try:
import ecs_logging
filehandler.setFormatter(ecs_logging.StdlibFormatter())
except ImportError:
pass
elasticapm_logger.addHandler(filehandler)
def _log_ecs_reformatting_callback(dict_key, old_value, new_value, config_instance):
"""
If ecs_logging is installed and log_ecs_reformatting is set to "override", we should
set the ecs_logging.StdlibFormatter as the formatted for every handler in
the root logger, and set the default processor for structlog to the
ecs_logging.StructlogFormatter.
"""
if new_value.lower() == "override":
try:
import ecs_logging
except ImportError:
return
# Stdlib
root_logger = logging.getLogger()
formatter = ecs_logging.StdlibFormatter()
for handler in root_logger.handlers:
handler.setFormatter(formatter)
# Structlog
try:
import structlog
structlog.configure(processors=[ecs_logging.StructlogFormatter()])
except ImportError:
pass
class _ConfigBase(object):
_NO_VALUE = object() # sentinel object
def __init__(self, config_dict=None, env_dict=None, inline_dict=None, copy=False):
"""
config_dict
Configuration dict as is common for frameworks such as flask and django.
Keys match the _ConfigValue.dict_key (usually all caps)
env_dict
Environment variables dict. Keys match the _ConfigValue.env_key
(usually "ELASTIC_APM_" + dict_key)
inline_dict
Any config passed in as kwargs to the Client object. Typically
the keys match the names of the _ConfigValue variables in the Config
object.
copy
Whether this object is being created to copy an existing Config
object. If True, don't run the initial `update` (which would call
callbacks if present)
"""
self._values = {}
self._errors = {}
self._dict_key_lookup = {}
self.callbacks_queue = []
for config_value in self.__class__.__dict__.values():
if not isinstance(config_value, _ConfigValue):
continue
self._dict_key_lookup[config_value.dict_key] = config_value
if not copy:
self.update(config_dict, env_dict, inline_dict, initial=True)
def update(self, config_dict=None, env_dict=None, inline_dict=None, initial=False):
if config_dict is None:
config_dict = {}
if env_dict is None:
env_dict = os.environ
if inline_dict is None:
inline_dict = {}
for field, config_value in compat.iteritems(self.__class__.__dict__):
if not isinstance(config_value, _ConfigValue):
continue
new_value = self._NO_VALUE
# first check environment
if config_value.env_key and config_value.env_key in env_dict:
new_value = env_dict[config_value.env_key]
# check the inline config
elif field in inline_dict:
new_value = inline_dict[field]
# finally, check config dictionary
elif config_value.dict_key in config_dict:
new_value = config_dict[config_value.dict_key]
# only set if new_value changed. We'll fall back to the field default if not.
if new_value is not self._NO_VALUE:
try:
setattr(self, field, new_value)
except ConfigurationError as e:
self._errors[e.field_name] = str(e)
# handle initial callbacks
if (
initial
and config_value.callbacks_on_default
and getattr(self, field) is not None
and getattr(self, field) == config_value.default
):
self.callbacks_queue.append((config_value.dict_key, self._NO_VALUE, config_value.default))
# if a field has not been provided by any config source, we have to check separately if it is required
if config_value.required and getattr(self, field) is None:
self._errors[config_value.dict_key] = "Configuration error: value for {} is required.".format(
config_value.dict_key
)
self.call_pending_callbacks()
def call_pending_callbacks(self):
"""
Call callbacks for config options matching list of tuples:
(dict_key, old_value, new_value)
"""
for dict_key, old_value, new_value in self.callbacks_queue:
self._dict_key_lookup[dict_key].call_callbacks(old_value, new_value, self)
self.callbacks_queue = []
@property
def values(self):
return self._values
@values.setter
def values(self, values):
self._values = values
@property
def errors(self):
return self._errors
def copy(self):
c = self.__class__(copy=True)
c._errors = {}
c.values = self.values.copy()
return c
class Config(_ConfigBase):
service_name = _ConfigValue(
"SERVICE_NAME", validators=[RegexValidator("^[a-zA-Z0-9 _-]+$")], default="python_service", required=True
)
service_node_name = _ConfigValue("SERVICE_NODE_NAME")
environment = _ConfigValue("ENVIRONMENT")
secret_token = _ConfigValue("SECRET_TOKEN")
api_key = _ConfigValue("API_KEY")
debug = _BoolConfigValue("DEBUG", default=False)
server_url = _ConfigValue("SERVER_URL", default="http://localhost:8200", required=True)
server_cert = _ConfigValue("SERVER_CERT", validators=[FileIsReadableValidator()])
verify_server_cert = _BoolConfigValue("VERIFY_SERVER_CERT", default=True)
use_certifi = _BoolConfigValue("USE_CERTIFI", default=True)
include_paths = _ListConfigValue("INCLUDE_PATHS")
exclude_paths = _ListConfigValue("EXCLUDE_PATHS", default=compat.get_default_library_patters())
filter_exception_types = _ListConfigValue("FILTER_EXCEPTION_TYPES")
server_timeout = _ConfigValue(
"SERVER_TIMEOUT",
type=float,
validators=[
UnitValidator(r"^((?:-)?\d+)(ms|s|m)?$", r"\d+(ms|s|m)", {"ms": 0.001, "s": 1, "m": 60, None: 1000})
],
default=5,
)
hostname = _ConfigValue("HOSTNAME", default=socket.gethostname())
auto_log_stacks = _BoolConfigValue("AUTO_LOG_STACKS", default=True)
transport_class = _ConfigValue("TRANSPORT_CLASS", default="elasticapm.transport.http.Transport", required=True)
processors = _ListConfigValue(
"PROCESSORS",
default=[
"elasticapm.processors.sanitize_stacktrace_locals",
"elasticapm.processors.sanitize_http_request_cookies",
"elasticapm.processors.sanitize_http_response_cookies",
"elasticapm.processors.sanitize_http_headers",
"elasticapm.processors.sanitize_http_wsgi_env",
"elasticapm.processors.sanitize_http_request_body",
],
)
sanitize_field_names = _ListConfigValue(
"SANITIZE_FIELD_NAMES", type=starmatch_to_regex, default=BASE_SANITIZE_FIELD_NAMES
)
metrics_sets = _ListConfigValue(
"METRICS_SETS",
default=[
"elasticapm.metrics.sets.cpu.CPUMetricSet",
],
)
metrics_interval = _ConfigValue(
"METRICS_INTERVAL",
type=int,
validators=[duration_validator, ExcludeRangeValidator(1, 999, "{range_start} - {range_end} ms")],
default=30000,
)
breakdown_metrics = _BoolConfigValue("BREAKDOWN_METRICS", default=True)
prometheus_metrics = _BoolConfigValue("PROMETHEUS_METRICS", default=False)
prometheus_metrics_prefix = _ConfigValue("PROMETHEUS_METRICS_PREFIX", default="prometheus.metrics.")
disable_metrics = _ListConfigValue("DISABLE_METRICS", type=starmatch_to_regex, default=[])
central_config = _BoolConfigValue("CENTRAL_CONFIG", default=True)
api_request_size = _ConfigValue("API_REQUEST_SIZE", type=int, validators=[size_validator], default=768 * 1024)
api_request_time = _ConfigValue("API_REQUEST_TIME", type=int, validators=[duration_validator], default=10 * 1000)
transaction_sample_rate = _ConfigValue(
"TRANSACTION_SAMPLE_RATE", type=float, validators=[PrecisionValidator(4, 0.0001)], default=1.0
)
transaction_max_spans = _ConfigValue("TRANSACTION_MAX_SPANS", type=int, default=500)
stack_trace_limit = _ConfigValue("STACK_TRACE_LIMIT", type=int, default=500)
span_frames_min_duration = _ConfigValue(
"SPAN_FRAMES_MIN_DURATION",
default=5,
validators=[
UnitValidator(r"^((?:-)?\d+)(ms|s|m)?$", r"\d+(ms|s|m)", {"ms": 1, "s": 1000, "m": 60000, None: 1})
],
type=int,
)
span_compression_enabled = _BoolConfigValue("SPAN_COMPRESSION_ENABLED", default=False)
span_compression_exact_match_max_duration = _ConfigValue(
"SPAN_COMPRESSION_EXACT_MATCH_MAX_DURATION",
default=50,
validators=[duration_validator],
type=int,
)
span_compression_same_kind_max_duration = _ConfigValue(
"SPAN_COMPRESSION_SAME_KIND_MAX_DURATION",
default=5,
validators=[duration_validator],
type=int,
)
exit_span_min_duration = _ConfigValue(
"exit_span_min_duration",
default=1,
validators=[duration_validator],
type=float,
)
collect_local_variables = _ConfigValue("COLLECT_LOCAL_VARIABLES", default="errors")
source_lines_error_app_frames = _ConfigValue("SOURCE_LINES_ERROR_APP_FRAMES", type=int, default=5)
source_lines_error_library_frames = _ConfigValue("SOURCE_LINES_ERROR_LIBRARY_FRAMES", type=int, default=5)
source_lines_span_app_frames = _ConfigValue("SOURCE_LINES_SPAN_APP_FRAMES", type=int, default=0)
source_lines_span_library_frames = _ConfigValue("SOURCE_LINES_SPAN_LIBRARY_FRAMES", type=int, default=0)
local_var_max_length = _ConfigValue("LOCAL_VAR_MAX_LENGTH", type=int, default=200)
local_var_list_max_length = _ConfigValue("LOCAL_VAR_LIST_MAX_LENGTH", type=int, default=10)
local_var_dict_max_length = _ConfigValue("LOCAL_VAR_DICT_MAX_LENGTH", type=int, default=10)
capture_body = _ConfigValue(
"CAPTURE_BODY",
default="off",
validators=[lambda val, _: {"errors": "error", "transactions": "transaction"}.get(val, val)],
)
async_mode = _BoolConfigValue("ASYNC_MODE", default=True)
instrument_django_middleware = _BoolConfigValue("INSTRUMENT_DJANGO_MIDDLEWARE", default=True)
autoinsert_django_middleware = _BoolConfigValue("AUTOINSERT_DJANGO_MIDDLEWARE", default=True)
transactions_ignore_patterns = _ListConfigValue("TRANSACTIONS_IGNORE_PATTERNS", default=[])
transaction_ignore_urls = _ListConfigValue("TRANSACTION_IGNORE_URLS", type=starmatch_to_regex, default=[])
service_version = _ConfigValue("SERVICE_VERSION")
framework_name = _ConfigValue("FRAMEWORK_NAME")
framework_version = _ConfigValue("FRAMEWORK_VERSION")
global_labels = _DictConfigValue("GLOBAL_LABELS")
disable_send = _BoolConfigValue("DISABLE_SEND", default=False)
enabled = _BoolConfigValue("ENABLED", default=True)
recording = _BoolConfigValue("RECORDING", default=True)
instrument = _BoolConfigValue("INSTRUMENT", default=True)
enable_distributed_tracing = _BoolConfigValue("ENABLE_DISTRIBUTED_TRACING", default=True)
capture_headers = _BoolConfigValue("CAPTURE_HEADERS", default=True)
django_transaction_name_from_route = _BoolConfigValue("DJANGO_TRANSACTION_NAME_FROM_ROUTE", default=False)
disable_log_record_factory = _BoolConfigValue("DISABLE_LOG_RECORD_FACTORY", default=False)
use_elastic_traceparent_header = _BoolConfigValue("USE_ELASTIC_TRACEPARENT_HEADER", default=True)
use_elastic_excepthook = _BoolConfigValue("USE_ELASTIC_EXCEPTHOOK", default=False)
cloud_provider = _ConfigValue("CLOUD_PROVIDER", default=True)
log_level = _ConfigValue(
"LOG_LEVEL",
validators=[EnumerationValidator(["trace", "debug", "info", "warning", "warn", "error", "critical", "off"])],
callbacks=[_log_level_callback],
)
log_file = _ConfigValue("LOG_FILE", default="")
log_file_size = _ConfigValue("LOG_FILE_SIZE", validators=[size_validator], type=int, default=50 * 1024 * 1024)
log_ecs_reformatting = _ConfigValue(
"LOG_ECS_REFORMATTING",
validators=[EnumerationValidator(["off", "override"])],
callbacks=[_log_ecs_reformatting_callback],
default="off",
)
@property
def is_recording(self):
if not self.enabled:
return False
else:
return self.recording
class VersionedConfig(ThreadManager):
"""
A thin layer around Config that provides versioning
"""
__slots__ = (
"_config",
"_version",
"_first_config",
"_first_version",
"_lock",
"transport",
"_update_thread",
"pid",
"start_stop_order",
)
def __init__(self, config_object, version, transport=None):
"""
Create a new VersionedConfig with an initial Config object
:param config_object: the initial Config object
:param version: a version identifier for the configuration
"""
self._config = self._first_config = config_object
self._version = self._first_version = version
self.transport = transport
self._lock = threading.Lock()
self._update_thread = None
super(VersionedConfig, self).__init__()
def update(self, version, **config):
"""
Update the configuration version
:param version: version identifier for the new configuration
:param config: a key/value map of new configuration
:return: configuration errors, if any
"""
new_config = self._config.copy()
# pass an empty env dict to ensure the environment doesn't get precedence
new_config.update(inline_dict=config, env_dict={})
if not new_config.errors:
with self._lock:
self._version = version
self._config = new_config
else:
return new_config.errors
def reset(self):
"""
Reset state to the original configuration
Note that because ConfigurationValues can have callbacks, we need to
note any differences between the original configuration and the most
recent configuration and run any callbacks that might exist for those
values.
"""
callbacks = []
for key in compat.iterkeys(self._config.values):
if key in self._first_config.values and self._config.values[key] != self._first_config.values[key]:
callbacks.append((key, self._config.values[key], self._first_config.values[key]))
with self._lock:
self._version = self._first_version
self._config = self._first_config
self._config.callbacks_queue.extend(callbacks)
self._config.call_pending_callbacks()
@property
def changed(self):
return self._config != self._first_config
def __getattr__(self, item):
return getattr(self._config, item)
def __setattr__(self, name, value):
if name not in self.__slots__:
setattr(self._config, name, value)
else:
super(VersionedConfig, self).__setattr__(name, value)
@property
def config_version(self):
return self._version
def update_config(self):
if not self.transport:
logger.warning("No transport set for config updates, skipping")
return
logger.debug("Checking for new config...")
keys = {"service": {"name": self.service_name}}
if self.environment:
keys["service"]["environment"] = self.environment
new_version, new_config, next_run = self.transport.get_config(self.config_version, keys)
if new_version and new_config:
errors = self.update(new_version, **new_config)
if errors:
logger.error("Error applying new configuration: %s", repr(errors))
else:
logger.info(
"Applied new remote configuration: %s",
"; ".join(
"%s=%s" % (compat.text_type(k), compat.text_type(v)) for k, v in compat.iteritems(new_config)
),
)
elif new_version == self.config_version:
logger.debug("Remote config unchanged")
elif not new_config and self.changed:
logger.debug("Remote config disappeared, resetting to original")
self.reset()
return next_run
def start_thread(self, pid=None):
self._update_thread = IntervalTimer(
self.update_config, 1, "eapm conf updater", daemon=True, evaluate_function_interval=True
)
self._update_thread.start()
super(VersionedConfig, self).start_thread(pid=pid)
def stop_thread(self):
if self._update_thread:
self._update_thread.cancel()
self._update_thread = None
def setup_logging(handler):
"""
Configures logging to pipe to Elastic APM.
For a typical Python install:
>>> from elasticapm.handlers.logging import LoggingHandler
>>> client = ElasticAPM(...)
>>> setup_logging(LoggingHandler(client))
Within Django:
>>> from elasticapm.contrib.django.handlers import LoggingHandler
>>> setup_logging(LoggingHandler())
Returns a boolean based on if logging was configured or not.
"""
# TODO We should probably revisit this. Does it make more sense as
# a method within the Client class? The Client object could easily
# pass itself into LoggingHandler and we could eliminate args altogether.
logger = logging.getLogger()
if handler.__class__ in map(type, logger.handlers):
return False
logger.addHandler(handler)
return True
|
beniwohli/apm-agent-python
|
elasticapm/conf/__init__.py
|
Python
|
bsd-3-clause
| 32,319
|
from logmon import app
from flask import render_template
LOG_FILE = app.config['LOG_FILE']
MAX_LEN = -100
@app.route('/')
def index():
with open(LOG_FILE, 'r') as f:
log_buffer = f.readlines()
return render_template('index.html', log_buffer=log_buffer[MAX_LEN:])
if __name__ == '__main__':
app.run()
|
maxcountryman/logmon
|
logmon/views.py
|
Python
|
bsd-3-clause
| 325
|
#this test is used for checking field list parameter specified in the query.
#using: python ./test_fieldList_inQuery/test_fieldList.py $SRCH2_ENGINE ./test_fieldList_inQuery/queriesAndResults.txt
#We check it by specifying the field list in the query and comparing the returned response with the expected result.
# The expected result contain only those field which we mention in the query.
import sys, urllib2, json, time, subprocess, os, commands, signal
sys.path.insert(0, 'srch2lib')
import test_lib
port = '8087'
#Function of checking the results
def checkResult(query, responseJson,resultValue):
# for key, value in responseJson:
# print key, value
isPass=1
if len(responseJson) == len(resultValue):
for i in range(0, len(resultValue)):
data = json.dumps(responseJson[i]['record'])
#print response_json['results'][i]['record']['id']
resultValue[i] = resultValue[i].rstrip('\n')
if (data != resultValue[i]):
isPass=0
print query+' test failed'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
for i in range(0, len(responseJson)):
print str(responseJson[i]['record']) +'||'+str(resultValue[i])
break
else:
isPass=0
print query+' test failed'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
maxLen = max(len(responseJson),len(resultValue))
for i in range(0, maxLen):
if i >= len(resultValue):
print responseJson[i]['record']+'||'
elif i >= len(responseJson):
print ' '+'||'+resultValue[i]
else:
print responseJson[i]['record']+'||'+resultValue[i]
if isPass == 1:
print query+' test pass'
return 0
return 1
#prepare the query based on the valid syntax
def prepareQuery(queryKeywords):
query = ''
################# prepare main query part
query = query + 'q='
# local parameters
query = query + '%7BdefaultPrefixComplete=COMPLETE%7D'
# keywords section
for i in range(0, len(queryKeywords)):
if i == (len(queryKeywords)-1):
query=query+queryKeywords[i]+'*' # last keyword prefix
else:
query=query+queryKeywords[i]+'%20AND%20'
################# fuzzy parameter
query = query + '&fuzzy=false&fl=name,category'
# print 'Query : ' + query
##################################
return query
def testFieldList(queriesAndResultsPath, binary_path, configFile):
#Start the engine server
args = [ binary_path, '--config-file=' + configFile ]
if test_lib.confirmPortAvailable(port) == False:
print 'Port ' + str(port) + ' already in use - aborting'
return -1
print 'starting engine: ' + args[0] + ' ' + args[1]
serverHandle = test_lib.startServer(args)
test_lib.pingServer(port)
#construct the query
failCount = 0
f_in = open(queriesAndResultsPath, 'r')
for line in f_in:
#get the query keyword and results
print line
value=line.split('||')
queryValue=value[0]
resultValue=(value[1])
#construct the query
query='http://localhost:' + port + '/search?'
query = query + prepareQuery([queryValue])
#print query
#do the query
response = urllib2.urlopen(query).read()
response_json = json.loads(response)
#check the result
failCount += checkResult(query, response_json['results'], [resultValue] )
test_lib.killServer(serverHandle)
print '=============================='
return failCount
if __name__ == '__main__':
#Path of the query file
#each line like "trust||01c90b4effb2353742080000" ---- query||record_ids(results)
binary_path = sys.argv[1]
queriesAndResultsPath = sys.argv[2]
exitCode = testFieldList(queriesAndResultsPath, binary_path, "./test_fieldList_inQuery/conf.xml")
time.sleep(1)
exitCode = testFieldList(queriesAndResultsPath, binary_path, "./test_fieldList_inQuery/conf1.xml")
time.sleep(1)
exitCode = testFieldList(queriesAndResultsPath, binary_path, "./test_fieldList_inQuery/conf2.xml")
time.sleep(1)
os._exit(exitCode)
|
SRCH2/srch2-ngn
|
test/wrapper/system_tests/test_fieldList_inQuery/test_fieldList.py
|
Python
|
bsd-3-clause
| 4,446
|
#!/usr/bin/env python
"""
Installation script:
To release a new version to PyPi:
- Ensure the version is correctly set in oscar.__init__.py
- Run: python setup.py sdist upload
"""
from setuptools import setup, find_packages
setup(
name = "django-url-tracker",
version = '0.1.4',
url = "https://github.com/tangentlabs/django-url-tracker",
author = "Sebastian Vetter",
author_email = "sebastian.vetter@tangentone.com.au",
description = ("A little app that trackes URL changes in a database table "
"to provide HTTP 301 & 410 on request."),
long_description = open('README.rst').read(),
license = "BSD",
packages = find_packages(exclude=["docs*", "tests*"]),
include_package_data = True,
install_requires=[
'django>=1.3.1,<1.6',
'South>=0.7.3',
],
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"Programming Language :: Python",
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
keywords = "seo, django, framework",
)
|
elbaschid/django-url-tracker
|
setup.py
|
Python
|
bsd-3-clause
| 1,297
|
"""This module contains our extensions to subprocess
The name captured proc originally was because a big purpose was to
capture the output and log it. Now it does a whole bunch more than
just log the output.
Be warned, if you see your pipelines hanging read
http://old.nabble.com/subprocess.Popen-pipeline-bug--td16026600.html
and set ``close_fds=True`` (now the default if you haven't set it)
"""
from __future__ import absolute_import
import errno
import logging
import os.path
import subprocess
import threading
import time
from contextlib import contextmanager
from subprocess import PIPE
def check(cmd, **kwargs):
"""Check if a subprocess call exits cleanly.
Turns off all logging and suppresses all output by default.
"""
kwargs.setdefault('stdout', False)
kwargs.setdefault('stderr', False)
kwargs.setdefault('logger', False)
return 0 == capturedCall(cmd, **kwargs)
class CallProcError(Exception):
returncode = None
cmd = None
def calledProcessError(rc, cmdname):
"""Build a CalledProcessError that can be unpickled
and looks like :py:class:`~subprocess`
"""
cpe = CallProcError('{0} returned {1}'.format(cmdname, rc))
cpe.returncode = rc
cpe.cmd = cmdname
return cpe
def capturedCall(cmd, check=False, **kwargs):
"""Call capturedPopen and wait for to exit.
See :py:func:`capturedPopen` for more documentation.
Additional Args:
* check (default False): If true then it will check the return
code of the process and raise a
:py:class:`~subprocess.CalledProcessError` if it is non-zero.
"""
p = capturedPopen(cmd, **kwargs)
def wait(monitor):
monitor = getattr(p, 'std%s_monitor_thread' % monitor, None)
if monitor:
monitor.join()
wait('out')
wait('err')
rc = p.wait()
if check and rc != 0:
raise calledProcessError(rc, p.cmdname)
return rc
def selfCaptured(klass):
def mungekw(self, kwargs):
kwargs.setdefault('logger', self.logger)
return kwargs
def add(func):
def newfunc(self, cmd, **kwargs):
return func(cmd, **mungekw(self, kwargs))
setattr(klass, func.__name__, newfunc)
add(capturedCall)
add(capturedPopen)
add(guardPopen)
return klass
class CapProc(object):
"""A superclass to provide some of the captured functions as methods.
Default some parameters (`logger`) based on the classes property.
"""
def mungekw(self, kwargs):
log = (getattr(self, 'logger', None) or
getattr(self, 'log', None) or
logging.root)
kwargs.setdefault('logger', log)
return kwargs
def capturedCall(self, cmd, **kwargs):
return capturedCall(cmd, **self.mungekw(kwargs))
def capturedPopen(self, cmd, **kwargs):
return capturedPopen(cmd, **self.mungekw(kwargs))
def guardPopen(self, cmd, **kwargs):
return guardPopen(cmd, **self.mungekw(kwargs))
def guarded_stdout_lines(self, cmd, **kwargs):
return guarded_stdout_lines(cmd, **self.mungekw(kwargs))
def capturedPopen(cmd, stdin=None, stdout=None, stderr=None,
logger=logging.root, cd=None,
stdout_level=logging.INFO,
stderr_level=logging.WARNING,
filter=None,
log_command=True,
**kwargs):
"""A wrapper around subprocess.Popen that offers the following extensions:
* stdin, stdout, stderr can be specified as False which will then
pass an open fd to /dev/null (using os module)
* if `logger` is provided (default=root logger) then log the
output of stdout and stderr (only if those streams aren't being
piped). These will be logged to two new loggers that are
children of the passed in logger
* Adds attribute `cmdname` to the returned popen object which is,
as best as we can divine, the name of the binary being run.
"""
# We use None as sigil values for stdin,stdout,stderr above so we
# can distinguish from the caller passing in Pipe.
if os.name == 'posix' and 'close_fds' not in kwargs:
# http://old.nabble.com/subprocess.Popen-pipeline-bug--td16026600.html
kwargs['close_fds'] = True
if cd:
# subprocess does this already with the 'cwd' arg, convert cd
# over so as not to break anyone's code.
kwargs['cwd'] = cd
if not isinstance(cmd, basestring):
cmd = [str(e) for e in cmd]
if logger and log_command:
# if we are logging, record the command we're running,
if 'cwd' in kwargs:
cwd = " in " + kwargs.get('cwd')
else:
cwd = ''
logger.debug("Running cmd: `%s`%s",
(cmd if isinstance(cmd, basestring)
else subprocess.list2cmdline(cmd)),
cwd)
# A list of FDs that were opened in the parent, to be passed to
# child, that need to be closed once that process has been spawned
close_in_parent = []
if stdin is False:
stdin = os.open(os.devnull, os.O_RDONLY)
close_in_parent.append(stdin)
def out(arg):
# figure out what to pass to the stdout stream
if arg == 'log':
if not logger:
raise ValueError("Requested logging but no logger")
return PIPE
elif arg is False or arg is 'devnull':
fd = os.open(os.devnull, os.O_WRONLY)
close_in_parent.append(fd)
return fd
elif arg is None and logger:
return PIPE
else:
return arg
p = subprocess.Popen(cmd, stdin=stdin,
stdout=out(stdout),
stderr=out(stderr),
**kwargs)
for fd in close_in_parent:
os.close(fd)
# try to get a simple name for the command.
if kwargs.get('shell'):
p.cmdname = 'sh'
else:
p.cmdname = os.path.basename(cmd[0])
if logger:
cmdname = p.cmdname[:-4] if p.cmdname.endswith('.exe') else p.cmdname
def monitor(level, src, name):
lname = "%s.%s" % (cmdname, name)
sublog = logger.getChild(lname)
def tfn():
l = src.readline()
while l != "": # The EOF sigil
sublog.log(level, l.rstrip())
l = src.readline()
th = threading.Thread(target=tfn, name=lname)
p.__setattr__("std%s_monitor_thread" % name, th)
th.start()
logvals = (None, 'log')
if stdout in logvals: monitor(stdout_level, p.stdout, "out")
if stderr in logvals: monitor(stderr_level, p.stderr, "err")
return p
@contextmanager
def guardPopen(cmd, **kwargs):
"""The with-block combination of capturedPopen and guardPopened.
Accepts in kwargs:
* keys as defined by :py:func:`capturedPopen`
* keys as defined by :py:func:`ensure_popen_exits`
"""
guardargs = {'logger': kwargs.get('logger')}
def popkey(key):
if key in kwargs:
guardargs[key] = kwargs.pop(key)
popkey('check')
popkey('timeout')
popkey('timeout_count')
with guardPopened(capturedPopen(cmd, **kwargs), **guardargs) as p:
yield p
@contextmanager
def guardPopened(popen, logger=None, **kwargs):
"""Supervise the given popen process ensuring exist/termination.
This is a context manager function that will:
* terminate the process if the exit is due to an exception
(leaving the exception active).
* Call :py:func:`ensure_popen_exits` (see f)
"""
try:
yield popen
except Exception:
terminate_process(popen, logger)
popen = None # skip the finally block's cleanup.
raise
finally:
if popen:
ensure_popen_exits(popen, logger=logger, **kwargs)
def ensure_popen_exits(popen, check=True, timeout=0.4, timeout_count=2,
logger=None):
"""Ensure a popen closes one way or another.
* Wait `timeout` seconds `timeout_count` times for the process to exit
* terminate the process if it hasn't
* if check: (default true) raise an exception if the process has
a non-zero returncode.
"""
if popen:
# try waiting a little for the process to finish
while popen.poll() is None and timeout > 0 and timeout_count > 0:
if logger:
cmdname = getattr(popen, 'cmdname', '<unknown>')
logger.debug(
"%s hasn't exited, waiting for %ss (%s tries left)",
cmdname, timeout, timeout_count)
time.sleep(timeout)
# wait longer each iteration
timeout = timeout * 2
timeout_count -= 1
terminate_process(popen, logger)
# Did it exit abnormally?
if check and popen.returncode != 0:
cmdname = getattr(popen, 'cmdname', '<unknown>')
raise calledProcessError(popen.returncode, cmdname)
def terminate_process(popen, logger=None, msglevel=logging.FATAL):
"""Do our best to terminate a process checking for windows shenanigans.
If the process has already exited then do nothing
"""
# If it hasn't exited yet, assume it's hung.
if popen and popen.poll() is None:
if logger and msglevel:
logger.log(msglevel, "Terminating %s",
getattr(popen, 'cmdname', '<unknown>'))
try:
popen.terminate()
except OSError, ose:
if ose.errno != errno.ESRCH: # no such process
raise
# handle race condition; if the process has exited since the
# last `poll()`, right now we only see this on
# windows. Determined error specifics by testing via ipython
# on windows, trying to `terminate` a completed process.
except Exception, we:
if hasattr(we, 'winerror'):
# 5 is 'Access Denied', could be either our
# process is dead or the PID is a different
# process now.
if we.winerror != 5:
raise
elif logger:
logger.fatal(
"Windows errno 5 implies proc already terminated: %s",
we)
else:
# not a windows error, handle normally
raise
def guarded_stdout_lines(cmd, **kwargs):
"returns an iterator for stdout lines"
kwargs['stdout'] = PIPE
with guardPopen(cmd, **kwargs) as proc:
try:
for l in iter(proc.stdout.readline, b''):
yield l.rstrip()
proc.wait()
except GeneratorExit:
# raised by user of this generator calling .close() (or
# the generator getting GCed)
# http://docs.python.org/2/reference/expressions.html#generator-iterator-methods
# ensure the process has exited.
terminate_process(
proc, logger=kwargs.get('logger'), msglevel=False)
# make proc appear to have exitted normally to bypass any
# other cleanup.
proc.returncode = 0
raise
# TODO: I'm not sure if this is the right abstraction; a class you
# could either call with all the cmds or add to one at a time would be
# nice.
def pipeline(*cmds, **kwargs):
"""Pipe a series of subprocess's output together.
:param stdin: if given will be the stdin of the first process
:param stdout: if given will be the stdout of the last process
Every cmd will be called with capturedPopen, any remaining kwargs
will be given to every call.
Return value is the list of popen objects
"""
if len(cmds) == 0:
return None
elif len(cmds) == 1:
return [capturedPopen(cmds[0], **kwargs)]
first_stdin = kwargs.pop('stdin', None)
final_stdout = kwargs.pop('stdout', None)
popens = []
for cmd in cmds:
if cmd is cmds[0]:
stdin = first_stdin
stdout = PIPE
elif cmd is cmds[-1]:
stdin = popens[-1].stdout
stdout = final_stdout
else:
stdin = popens[-1].stdout
stdout = PIPE
popens.append(capturedPopen(cmd, stdin=stdin, stdout=stdout, **kwargs))
return popens
@contextmanager
def environment(**kwargs):
"Add extra environment variables as a context manager"
old_env = {}
for key, val in kwargs.items():
old_env[key] = os.environ.pop(key, None)
if val:
os.environ[key] = val
try:
yield
finally:
for key, val in old_env.items():
if val is None:
os.environ.pop(key, None)
else:
os.environ[key] = val
# Copright (c) 2011,2014 Accelerated Data Works
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
NProfileAnalysisComputationalTool/npact
|
pynpact/pynpact/capproc.py
|
Python
|
bsd-3-clause
| 14,302
|
# test_tree.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from io import BytesIO
from unittest import skipIf
from git.objects import (
Tree,
Blob
)
from test.lib import TestBase
from git.util import HIDE_WINDOWS_KNOWN_ERRORS
import os.path as osp
class TestTree(TestBase):
@skipIf(HIDE_WINDOWS_KNOWN_ERRORS, """
File "C:\\projects\\gitpython\\git\\cmd.py", line 559, in execute
raise GitCommandNotFound(command, err)
git.exc.GitCommandNotFound: Cmd('git') not found due to: OSError('[WinError 6] The handle is invalid')
cmdline: git cat-file --batch-check""")
def test_serializable(self):
# tree at the given commit contains a submodule as well
roottree = self.rorepo.tree('6c1faef799095f3990e9970bc2cb10aa0221cf9c')
for item in roottree.traverse(ignore_self=False):
if item.type != Tree.type:
continue
# END skip non-trees
tree = item
# trees have no dict
self.assertRaises(AttributeError, setattr, tree, 'someattr', 1)
orig_data = tree.data_stream.read()
orig_cache = tree._cache
stream = BytesIO()
tree._serialize(stream)
assert stream.getvalue() == orig_data
stream.seek(0)
testtree = Tree(self.rorepo, Tree.NULL_BIN_SHA, 0, '')
testtree._deserialize(stream)
assert testtree._cache == orig_cache
# replaces cache, but we make sure of it
del(testtree._cache)
testtree._deserialize(stream)
# END for each item in tree
@skipIf(HIDE_WINDOWS_KNOWN_ERRORS, """
File "C:\\projects\\gitpython\\git\\cmd.py", line 559, in execute
raise GitCommandNotFound(command, err)
git.exc.GitCommandNotFound: Cmd('git') not found due to: OSError('[WinError 6] The handle is invalid')
cmdline: git cat-file --batch-check""")
def test_traverse(self):
root = self.rorepo.tree('0.1.6')
num_recursive = 0
all_items = []
for obj in root.traverse():
if "/" in obj.path:
num_recursive += 1
assert isinstance(obj, (Blob, Tree))
all_items.append(obj)
# END for each object
assert all_items == root.list_traverse()
# limit recursion level to 0 - should be same as default iteration
assert all_items
assert 'CHANGES' in root
assert len(list(root)) == len(list(root.traverse(depth=1)))
# only choose trees
trees_only = lambda i, d: i.type == "tree"
trees = list(root.traverse(predicate=trees_only))
assert len(trees) == len([i for i in root.traverse() if trees_only(i, 0)])
# test prune
lib_folder = lambda t, d: t.path == "lib"
pruned_trees = list(root.traverse(predicate=trees_only, prune=lib_folder))
assert len(pruned_trees) < len(trees)
# trees and blobs
assert len(set(trees) | set(root.trees)) == len(trees)
assert len({b for b in root if isinstance(b, Blob)} | set(root.blobs)) == len(root.blobs)
subitem = trees[0][0]
assert "/" in subitem.path
assert subitem.name == osp.basename(subitem.path)
# assure that at some point the traversed paths have a slash in them
found_slash = False
for item in root.traverse():
assert osp.isabs(item.abspath)
if '/' in item.path:
found_slash = True
# END check for slash
# slashes in paths are supported as well
# NOTE: on py3, / doesn't work with strings anymore ...
assert root[item.path] == item == root / item.path
# END for each item
assert found_slash
|
gitpython-developers/GitPython
|
test/test_tree.py
|
Python
|
bsd-3-clause
| 3,965
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script will check out llvm and clang, and then package the results up
to a tgz file."""
import argparse
import fnmatch
import itertools
import os
import shutil
import subprocess
import sys
import tarfile
# Path constants.
THIS_DIR = os.path.dirname(__file__)
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
THIRD_PARTY_DIR = os.path.join(THIS_DIR, '..', '..', '..', 'third_party')
LLVM_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm')
LLVM_BOOTSTRAP_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-bootstrap')
LLVM_BOOTSTRAP_INSTALL_DIR = os.path.join(THIRD_PARTY_DIR,
'llvm-bootstrap-install')
LLVM_BUILD_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-build')
LLVM_RELEASE_DIR = os.path.join(LLVM_BUILD_DIR, 'Release+Asserts')
LLVM_LTO_GOLD_PLUGIN_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-lto-gold-plugin')
STAMP_FILE = os.path.join(LLVM_BUILD_DIR, 'cr_build_revision')
def Tee(output, logfile):
logfile.write(output)
print output,
def TeeCmd(cmd, logfile, fail_hard=True):
"""Runs cmd and writes the output to both stdout and logfile."""
# Reading from PIPE can deadlock if one buffer is full but we wait on a
# different one. To work around this, pipe the subprocess's stderr to
# its stdout buffer and don't give it a stdin.
# shell=True is required in cmd.exe since depot_tools has an svn.bat, and
# bat files only work with shell=True set.
proc = subprocess.Popen(cmd, bufsize=1, shell=sys.platform == 'win32',
stdin=open(os.devnull), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline,''):
Tee(line, logfile)
if proc.poll() is not None:
break
exit_code = proc.wait()
if exit_code != 0 and fail_hard:
print 'Failed:', cmd
sys.exit(1)
def PrintTarProgress(tarinfo):
print 'Adding', tarinfo.name
return tarinfo
def GetExpectedStamp():
rev_cmd = [sys.executable, os.path.join(THIS_DIR, 'update.py'),
'--print-revision']
return subprocess.check_output(rev_cmd).rstrip()
def GetGsutilPath():
if not 'find_depot_tools' in sys.modules:
sys.path.insert(0, os.path.join(CHROMIUM_DIR, 'build'))
global find_depot_tools
import find_depot_tools
depot_path = find_depot_tools.add_depot_tools_to_path()
if depot_path is None:
print ('depot_tools are not found in PATH. '
'Follow the instructions in this document '
'http://dev.chromium.org/developers/how-tos/install-depot-tools'
' to install depot_tools and then try again.')
sys.exit(1)
gsutil_path = os.path.join(depot_path, 'gsutil.py')
return gsutil_path
def RunGsutil(args):
return subprocess.call([sys.executable, GetGsutilPath()] + args)
def GsutilArchiveExists(archive_name, platform):
gsutil_args = ['-q', 'stat',
'gs://chromium-browser-clang/%s/%s.tgz' %
(platform, archive_name)]
return RunGsutil(gsutil_args) == 0
def MaybeUpload(args, archive_name, platform):
# We don't want to rewrite the file, if it already exists on the server,
# so -n option to gsutil is used. It will warn, if the upload was aborted.
gsutil_args = ['cp', '-n', '-a', 'public-read',
'%s.tgz' % archive_name,
'gs://chromium-browser-clang/%s/%s.tgz' %
(platform, archive_name)]
if args.upload:
print 'Uploading %s to Google Cloud Storage...' % archive_name
exit_code = RunGsutil(gsutil_args)
if exit_code != 0:
print "gsutil failed, exit_code: %s" % exit_code
os.exit(exit_code)
else:
print 'To upload, run:'
print ('gsutil %s' % ' '.join(gsutil_args))
def main():
parser = argparse.ArgumentParser(description='build and package clang')
parser.add_argument('--upload', action='store_true',
help='Upload the target archive to Google Cloud Storage.')
args = parser.parse_args()
# Check that the script is not going to upload a toolchain built from HEAD.
use_head_revision = 'LLVM_FORCE_HEAD_REVISION' in os.environ
if args.upload and use_head_revision:
print ("--upload and LLVM_FORCE_HEAD_REVISION could not be used "
"at the same time.")
return 1
expected_stamp = GetExpectedStamp()
pdir = 'clang-' + expected_stamp
golddir = 'llvmgold-' + expected_stamp
print pdir
if sys.platform == 'darwin':
platform = 'Mac'
elif sys.platform == 'win32':
platform = 'Win'
else:
platform = 'Linux_x64'
# Check if Google Cloud Storage already has the artifacts we want to build.
if (args.upload and GsutilArchiveExists(pdir, platform) and
not sys.platform.startswith('linux') or
GsutilArchiveExists(golddir, platform)):
print ('Desired toolchain revision %s is already available '
'in Google Cloud Storage:') % expected_stamp
print 'gs://chromium-browser-clang/%s/%s.tgz' % (platform, pdir)
if sys.platform.startswith('linux'):
print 'gs://chromium-browser-clang/%s/%s.tgz' % (platform, golddir)
return 0
with open('buildlog.txt', 'w') as log:
Tee('Diff in llvm:\n', log)
TeeCmd(['svn', 'stat', LLVM_DIR], log, fail_hard=False)
TeeCmd(['svn', 'diff', LLVM_DIR], log, fail_hard=False)
Tee('Diff in llvm/tools/clang:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
# TODO(thakis): compiler-rt is in projects/compiler-rt on Windows but
# llvm/compiler-rt elsewhere. So this diff call is currently only right on
# Windows.
Tee('Diff in llvm/compiler-rt:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
Tee('Diff in llvm/projects/libcxx:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
Tee('Starting build\n', log)
# Do a clobber build.
shutil.rmtree(LLVM_BOOTSTRAP_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BOOTSTRAP_INSTALL_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BUILD_DIR, ignore_errors=True)
opt_flags = []
if sys.platform.startswith('linux'):
opt_flags += ['--lto-gold-plugin']
build_cmd = [sys.executable, os.path.join(THIS_DIR, 'update.py'),
'--bootstrap', '--force-local-build',
'--run-tests'] + opt_flags
TeeCmd(build_cmd, log)
stamp = open(STAMP_FILE).read().rstrip()
if stamp != expected_stamp:
print 'Actual stamp (%s) != expected stamp (%s).' % (stamp, expected_stamp)
return 1
shutil.rmtree(pdir, ignore_errors=True)
# Copy a whitelist of files to the directory we're going to tar up.
# This supports the same patterns that the fnmatch module understands.
exe_ext = '.exe' if sys.platform == 'win32' else ''
want = ['bin/llvm-symbolizer' + exe_ext,
'lib/clang/*/asan_blacklist.txt',
'lib/clang/*/cfi_blacklist.txt',
# Copy built-in headers (lib/clang/3.x.y/include).
'lib/clang/*/include/*',
]
if sys.platform == 'win32':
want.append('bin/clang-cl.exe')
want.append('bin/lld-link.exe')
else:
so_ext = 'dylib' if sys.platform == 'darwin' else 'so'
want.extend(['bin/clang',
'lib/libFindBadConstructs.' + so_ext,
'lib/libBlinkGCPlugin.' + so_ext,
])
if sys.platform == 'darwin':
want.extend([# Copy only the OSX (ASan and profile) and iossim (ASan)
# runtime libraries:
'lib/clang/*/lib/darwin/*asan_osx*',
'lib/clang/*/lib/darwin/*asan_iossim*',
'lib/clang/*/lib/darwin/*profile_osx*',
])
elif sys.platform.startswith('linux'):
# Copy the libstdc++.so.6 we linked Clang against so it can run.
want.append('lib/libstdc++.so.6')
# Copy only
# lib/clang/*/lib/linux/libclang_rt.{[atm]san,san,ubsan,profile}-*.a ,
# but not dfsan.
want.extend(['lib/clang/*/lib/linux/*[atm]san*',
'lib/clang/*/lib/linux/*ubsan*',
'lib/clang/*/lib/linux/*libclang_rt.san*',
'lib/clang/*/lib/linux/*profile*',
'lib/clang/*/msan_blacklist.txt',
])
elif sys.platform == 'win32':
want.extend(['lib/clang/*/lib/windows/clang_rt.asan*.dll',
'lib/clang/*/lib/windows/clang_rt.asan*.lib',
'lib/clang/*/include_sanitizer/*',
])
for root, dirs, files in os.walk(LLVM_RELEASE_DIR):
# root: third_party/llvm-build/Release+Asserts/lib/..., rel_root: lib/...
rel_root = root[len(LLVM_RELEASE_DIR)+1:]
rel_files = [os.path.join(rel_root, f) for f in files]
wanted_files = list(set(itertools.chain.from_iterable(
fnmatch.filter(rel_files, p) for p in want)))
if wanted_files:
# Guaranteed to not yet exist at this point:
os.makedirs(os.path.join(pdir, rel_root))
for f in wanted_files:
src = os.path.join(LLVM_RELEASE_DIR, f)
dest = os.path.join(pdir, f)
shutil.copy(src, dest)
# Strip libraries.
if sys.platform == 'darwin' and f.endswith('.dylib'):
subprocess.call(['strip', '-x', dest])
elif (sys.platform.startswith('linux') and
os.path.splitext(f)[1] in ['.so', '.a']):
subprocess.call(['strip', '-g', dest])
# Set up symlinks.
if sys.platform != 'win32':
os.symlink('clang', os.path.join(pdir, 'bin', 'clang++'))
os.symlink('clang', os.path.join(pdir, 'bin', 'clang-cl'))
# Copy libc++ headers.
if sys.platform == 'darwin':
shutil.copytree(os.path.join(LLVM_BOOTSTRAP_INSTALL_DIR, 'include', 'c++'),
os.path.join(pdir, 'include', 'c++'))
# Copy buildlog over.
shutil.copy('buildlog.txt', pdir)
# Create archive.
tar_entries = ['bin', 'lib', 'buildlog.txt']
if sys.platform == 'darwin':
tar_entries += ['include']
with tarfile.open(pdir + '.tgz', 'w:gz') as tar:
for entry in tar_entries:
tar.add(os.path.join(pdir, entry), arcname=entry, filter=PrintTarProgress)
MaybeUpload(args, pdir, platform)
# Zip up gold plugin on Linux.
if sys.platform.startswith('linux'):
shutil.rmtree(golddir, ignore_errors=True)
os.makedirs(os.path.join(golddir, 'lib'))
shutil.copy(os.path.join(LLVM_LTO_GOLD_PLUGIN_DIR, 'lib', 'LLVMgold.so'),
os.path.join(golddir, 'lib'))
with tarfile.open(golddir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(golddir, 'lib'), arcname='lib',
filter=PrintTarProgress)
MaybeUpload(args, golddir, platform)
# Zip up llvm-objdump for sanitizer coverage.
objdumpdir = 'llvmobjdump-' + stamp
shutil.rmtree(objdumpdir, ignore_errors=True)
os.makedirs(os.path.join(objdumpdir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'llvm-objdump' + exe_ext),
os.path.join(objdumpdir, 'bin'))
with tarfile.open(objdumpdir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(objdumpdir, 'bin'), arcname='bin',
filter=PrintTarProgress)
MaybeUpload(args, objdumpdir, platform)
# FIXME: Warn if the file already exists on the server.
if __name__ == '__main__':
sys.exit(main())
|
danakj/chromium
|
tools/clang/scripts/package.py
|
Python
|
bsd-3-clause
| 11,778
|
GENDERS = ['Male','Female']
|
rimbalinux/LMD3
|
people/settings.py
|
Python
|
bsd-3-clause
| 28
|
from auditor.auditor.settings import *
##########################################################################
#
# Server settings
#
##########################################################################
ALLOWED_HOSTS = ["localhost"]
WSGI_APPLICATION = 'auditor.auditor.wsgi_production.application'
##########################################################################
#
# Database settings
#
##########################################################################
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(VAR_DIR, 'db', 'production_db.sqlite3'),
}
}
|
siggame/auditor
|
auditor/auditor/production.py
|
Python
|
bsd-3-clause
| 642
|
accuracy = 1e-8
class Cell:
def __init__(self, vtkCell, bounds, q):
self.vtkCell = vtkCell
self.bounds = bounds
self.q = q
def __eq__(self, other):
global accuracy
if abs(self.q - other.q) > accuracy:
return false
if len(sel.bounds) != len(other.bounds):
return false
for i in xrange(len(self.bounds)):
if abs(self.bounds[i] - other.bounds[i]) > accuracy:
return false
return true
def __cmp__(self, other):
global accuracy
if self.q - other.q > accuracy:
return 1
elif other.q - self.q > accuracy:
return -1
if len(self.bounds) != len(other.bounds):
return false
for i in xrange(len(self.bounds)):
if self.bounds[i] - other.bounds[i] > accuracy:
return 1
elif other.bounds[i] - self.bounds[i] > accuracy:
return -1
return 0
def __str__(self):
return "q: " + str(self.q) + " bounds: " + str(self.bounds)
def parseRange(argument):
if ':' in argument:
return range(*map(int, argument.split(':')))
return range(int(argument), int(argument)+1)
def readCellsFromFile(cells, path, iteration, rank):
import vtk
import os.path
filename = path.replace('__ITERATION__', str(iteration)).replace('__RANK__', str(rank))
if os.path.exists(filename):
reader = vtk.vtkDataSetReader()
reader.SetFileName(filename)
reader.SetReadAllScalars(True)
reader.Update()
grid = reader.GetOutput()
numberOfCells = grid.GetNumberOfCells()
cellData = grid.GetCellData()
qs = cellData.GetScalars("q0")
for cellId in xrange(numberOfCells):
vtkCell = grid.GetCell(cellId)
q = qs.GetTuple(cellId)[0]
cells.append(Cell(vtkCell, vtkCell.GetBounds()[:], q))
return numberOfCells
else:
return 0
def findClosestMatch(cell, cells):
bestIndex = -1
minDistance = 1000000
import math
for index in xrange(len(cells)):
c = cells[index]
distance = 0
for i in xrange(len(cell.bounds)):
distance += (cell.bounds[i] - c.bounds[i])**2
distance = math.sqrt((c.q - cell.q)**2 * 10 + distance)
if distance < minDistance:
minDistance = distance
bestIndex = index
return bestIndex
def findCellInList(cell, cells):
lower = 0
upper = len(cells)
while(upper > lower):
middle = (upper + lower) / 2
middleCell = cells[middle]
if middleCell < cell:
lower = middle + 1
elif middleCell > cell:
upper = middle
else:
return middle
return -1
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description='Tool for comparing vtk output of parallel runs.')
parser.add_argument('path1', help='The path to the first set of vtk files. Use __ITERATION__ for iteration number and __RANK__ for rank number.')
parser.add_argument('path2', help='The path to the second set of vtk files. Use __ITERATION__ for iteration number and __RANK__ for rank number.')
parser.add_argument('iteration1', type=int, help='The iteration number of the first set of vtk files.')
parser.add_argument('ranks1', help='The range of ranks for the first set of vtk files. Define single number or min:max.')
parser.add_argument('iteration2', type=int, help='The iteration number of the second set of vtk files.')
parser.add_argument('ranks2', help='The range of ranks for the second set of vtk files. Define single number or min:max.')
parser.add_argument('accuracy', help='The accuracy for numerical equality.', type=float, nargs='?', const='1e-5')
arguments = parser.parse_args()
global accuracy
accuracy = arguments.accuracy
if arguments.path2 == 'SameAsPath1':
path2 = arguments.path1
else:
path2 = arguments.path2
#Loop through ranks1
cells1 = [] #set()
ranks1 = parseRange(arguments.ranks1)
for rank in ranks1:
print "1: Parsing rank...", rank
numberOfCells = readCellsFromFile(cells1, arguments.path1, arguments.iteration1, rank)
print "Read", numberOfCells, "cells."
print "1: Total number of cells:", len(cells1)
#Loop through ranks2
cells2 = [] #set()
ranks2 = parseRange(arguments.ranks2)
print ranks2
for rank in ranks2:
print "2: Parsing rank", rank
numberOfCells = readCellsFromFile(cells2, path2, arguments.iteration2, rank)
print "Read", numberOfCells, "cells."
print "2: Total number of cells:", len(cells2)
#Compare lists
if len(cells1) != len(cells2):
raise Exception("Number of cells do not match!")
cells1.sort()
cells2.sort()
for cell in cells1:
index = findCellInList(cell, cells2)
if index == -1:
bestMatch = findClosestMatch(cell, cells2)
if bestMatch == -1:
bestMatchString = ""
else:
bestMatchString = "Best match is " + str(cells2[bestMatch])
raise Exception("No matching cell for " + str(cell) + ". " + bestMatchString)
else:
del cells2[index]
print "All cells match"
if __name__=="__main__":
main()
|
unterweg/peanoclaw
|
testscenarios/tools/compareResult.py
|
Python
|
bsd-3-clause
| 5,050
|
from PyQt4.QtGui import QToolButton, QPainter, QPixmap, QPen, QColor, QColorDialog, QIcon
from PyQt4.QtCore import SIGNAL, QRect
class ColorButton(QToolButton):
def __init__(self, *args):
QToolButton.__init__(self, *args)
self._color = QColor()
self.connect(self, SIGNAL("clicked()"), self.selectColor)
def color(self):
return self._color
def setColor(self, color):
self._color = color
self.updateColor()
def updateColor(self):
iconSize = self.iconSize()
width = iconSize.width()
height = iconSize.height()
pixmap = QPixmap(iconSize)
pixmap.fill(self._color)
painter = QPainter()
painter.begin(pixmap)
painter.setPen(QPen(QColor("#777777")))
painter.drawRect(QRect(0, 0, width - 1, height - 1))
painter.end()
self.setIcon(QIcon(pixmap))
def selectColor(self):
self.setChecked(True)
color = QColorDialog.getColor(self._color)
self.setChecked(False)
if color != self._color:
self.setColor(color)
self.emit(SIGNAL("colorChanged()"))
|
gt-ros-pkg/rcommander-core
|
nodebox_qt/src/nodebox/gui/qt/widgets/colorbutton.py
|
Python
|
bsd-3-clause
| 1,154
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gettext
import os
from datetime import datetime, timedelta
from importlib import import_module
from unittest import TestCase, skipIf
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.db.models import CharField, DateField
from django.test import TestCase as DjangoTestCase, override_settings
from django.utils import six, translation
from . import models
from .widgetadmin import site as widget_admin_site
try:
import pytz
except ImportError:
pytz = None
admin_static_prefix = lambda: {
'ADMIN_STATIC_PREFIX': "%sadmin/" % settings.STATIC_URL,
}
class AdminFormfieldForDBFieldTests(TestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
# Check that we got a field of the right type
self.assertTrue(
isinstance(widget, widgetclass),
"Wrong widget for %s.%s: expected %s, got %s" % (
model.__class__.__name__,
fieldname,
widgetclass,
type(widget),
)
)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(models.Event, 'start_date', widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(models.Member, 'birthdate', widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(models.Event, 'start_time', widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(models.Event, 'description', widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(models.Event, 'link', widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(models.Event, 'min_age', widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(models.Member, 'name', widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(models.Member, 'email', widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(models.Album, 'cover_art', widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['main_band'])
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(models.Event, 'main_band', widgets.AdminRadioSelect,
radio_fields={'main_band': admin.VERTICAL})
self.assertEqual(ff.empty_label, None)
def test_many_to_many(self):
self.assertFormfield(models.Band, 'members', forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def test_filtered_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def test_formfield_overrides(self):
self.assertFormfield(models.Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def test_formfield_overrides_widget_instances(self):
"""
Test that widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size': '10'})}
}
ma = BandAdmin(models.Band, admin.site)
f1 = ma.formfield_for_dbfield(models.Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(models.Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def test_field_with_choices(self):
self.assertFormfield(models.Member, 'gender', forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(models.Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender': admin.VERTICAL})
def test_inheritance(self):
self.assertFormfield(models.Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ['companies']
self.assertFormfield(models.Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(models.Advisor, admin.site)
f = ma.formfield_for_dbfield(models.Advisor._meta.get_field('companies'), request=None)
self.assertEqual(six.text_type(f.help_text), 'Hold down "Control", or "Command" on a Mac, to select more than one.')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class AdminFormfieldForDBFieldWithRequestTests(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.login(username="super", password="secret")
response = self.client.get(reverse('admin:admin_widgets_cartire_add'))
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagon Passat")
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyWidgetChangeList(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
def setUp(self):
self.client.login(username="super", password="secret")
def test_changelist_ForeignKey(self):
response = self.client.get(reverse('admin:admin_widgets_car_changelist'))
self.assertContains(response, '/auth/user/add/')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyRawIdWidget(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
def setUp(self):
self.client.login(username="super", password="secret")
def test_nonexistent_target_id(self):
band = models.Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"main_band": '%s' % pk,
}
# Try posting with a non-existent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), post_data)
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'),
{"main_band": test_str})
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
class FilteredSelectMultipleWidgetTest(DjangoTestCase):
def test_render(self):
w = widgets.FilteredSelectMultiple('test', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilter">\n</select><script type="text/javascript">addEvent(window, "load", function(e) {SelectFilter.init("id_test", "test", 0, "%(ADMIN_STATIC_PREFIX)s"); });</script>\n' % admin_static_prefix()
)
def test_stacked_render(self):
w = widgets.FilteredSelectMultiple('test', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilterstacked">\n</select><script type="text/javascript">addEvent(window, "load", function(e) {SelectFilter.init("id_test", "test", 1, "%(ADMIN_STATIC_PREFIX)s"); });</script>\n' % admin_static_prefix()
)
class AdminDateWidgetTest(DjangoTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10" />',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20" />',
)
class AdminTimeWidgetTest(DjangoTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8" />',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20" />',
)
class AdminSplitDateTimeWidgetTest(DjangoTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">Date: <input value="2007-12-01" type="text" class="vDateField" name="test_0" size="10" /><br />Time: <input value="09:30:00" type="text" class="vTimeField" name="test_1" size="8" /></p>',
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with self.settings(USE_L10N=True), translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">Datum: <input value="01.12.2007" type="text" class="vDateField" name="test_0" size="10" /><br />Zeit: <input value="09:30:00" type="text" class="vTimeField" name="test_1" size="8" /></p>',
)
class AdminURLWidgetTest(DjangoTestCase):
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url" />'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">http://example.com</a><br />Change:<input class="vURLField" name="test" type="url" value="http://example.com" /></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">http://example-äüö.com</a><br />Change:<input class="vURLField" name="test" type="url" value="http://example-äüö.com" /></p>'
)
def test_render_quoting(self):
# WARNING: Don't use assertHTMLEqual in that testcase!
# assertHTMLEqual will get rid of some escapes which are tested here!
w = widgets.AdminURLFieldWidget()
self.assertEqual(
w.render('test', 'http://example.com/<sometag>some text</sometag>'),
'<p class="url">Currently: <a href="http://example.com/%3Csometag%3Esome%20text%3C/sometag%3E">http://example.com/<sometag>some text</sometag></a><br />Change: <input class="vURLField" name="test" type="url" value="http://example.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://example-äüö.com/<sometag>some text</sometag>'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com/%3Csometag%3Esome%20text%3C/sometag%3E">http://example-äüö.com/<sometag>some text</sometag></a><br />Change: <input class="vURLField" name="test" type="url" value="http://example-äüö.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"'),
'<p class="url">Currently: <a href="http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22">http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"</a><br />Change: <input class="vURLField" name="test" type="url" value="http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"" /></p>'
)
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls',
)
class AdminFileWidgetTests(DjangoTestCase):
fixtures = ['admin-widgets-users.xml']
def setUp(self):
band = models.Band.objects.create(name='Linkin Park')
self.album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" /> '
'<label for="test-clear_id">Clear</label></span><br />'
'Change: <input type="file" name="test" /></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test" />',
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.login(username="super", password="secret")
response = self.client.get(reverse('admin:admin_widgets_album_change', args=(self.album.id,)))
self.assertContains(
response,
'<p><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
'albums\hybrid_theory.jpg</a></p>' % {'STORAGE_URL': default_storage.url('')},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art" />',
html=True,
)
response = self.client.get(reverse('admin:admin_widgets_album_add'))
self.assertContains(
response,
'<p></p>',
html=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ForeignKeyRawIdWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = models.Album._meta.get_field('band').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.pk, attrs={}), (
'<input type="text" name="test" value="%(bandpk)s" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/band/?_to_field=id" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong>Linkin Park</strong>'
) % {'bandpk': band.pk}
)
def test_relations_to_non_primary_key(self):
# Check that ForeignKeyRawIdWidget works with fields which aren't
# related to the model's primary key.
apple = models.Inventory.objects.create(barcode=86, name='Apple')
models.Inventory.objects.create(barcode=22, name='Pear')
core = models.Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = models.Inventory._meta.get_field('parent').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}), (
'<input type="text" name="test" value="86" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" class="related-lookup" id="lookup_id_test" title="Lookup">'
'</a> <strong>Apple</strong>'
)
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = models.Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = models.Bee._meta.get_field('honeycomb').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s" /> <strong>Honeycomb object</strong>' % {'hcombpk': big_honeycomb.pk}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = models.Individual.objects.create(name='Subject #1')
models.Individual.objects.create(name='Child', parent=subject1)
rel = models.Individual._meta.get_field('parent').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s" /> <strong>Individual object</strong>' % {'subj1pk': subject1.pk}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = models.Inventory._meta.get_field('parent').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = models.Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = models.Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}), (
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" class="related-lookup" id="lookup_id_test" title="Lookup">'
'</a> <strong>Hidden</strong>'
)
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ManyToManyRawIdWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
m1 = models.Member.objects.create(name='Chester')
m2 = models.Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = models.Band._meta.get_field('members').rel
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}), (
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField" />'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk, m2pk=m2.pk)
)
self.assertHTMLEqual(
w.render('test', [m1.pk]), (
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk)
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = models.Advisor.objects.create(name='Rockstar Techie')
c1 = models.Company.objects.create(name='Doodle')
c2 = models.Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = models.Advisor._meta.get_field('companies').rel
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s" />' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s" />' % {'c1pk': c1.pk}
)
class RelatedFieldWidgetWrapperTests(DjangoTestCase):
def test_no_can_add_related(self):
rel = models.Individual._meta.get_field('parent').rel
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = models.Individual._meta.get_field('parent').rel
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = models.Individual._meta.get_field('soulmate').rel
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_show_hide_date_time_picker_widgets(self):
"""
Ensure that pressing the ESC key closes the date and time picker
widgets.
Refs #17064.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# First, with the date picker widget ---------------------------------
# Check that the date picker is hidden
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Check that the date picker is visible
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the date picker is hidden again
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Then, with the time picker widget ----------------------------------
# Check that the time picker is hidden
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon
self.selenium.find_element_by_id('clocklink0').click()
# Check that the time picker is visible
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the time picker is hidden again
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute('class'), 'nonday')
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute('class'), 'selected')
self.assertEqual(selected.text, '1')
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute('class') == 'selected']
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
Ensure that the calendar show the date from the input field for every
locale supported by django.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Enter test data
member = models.Member.objects.create(name='Bob', birthdate=datetime(1984, 5, 15), gender='M')
# Get month names translations for every locales
month_string = 'January February March April May June July August September October November December'
path = os.path.join(os.path.dirname(import_module('django.contrib.admin').__file__), 'locale')
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation('djangojs', path, [language_code])
except IOError:
continue
if month_string in catalog._catalog:
month_names = catalog._catalog[month_string]
else:
month_names = month_string
# Get the expected caption
may_translation = month_names.split(' ')[4]
expected_caption = '{0:s} {1:d}'.format(may_translation, 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code, USE_L10N=True):
# Open a page that has a date picker widget
self.selenium.get('{}{}'.format(self.live_server_url,
reverse('admin:admin_widgets_member_change', args=(member.pk,))))
# Click on the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Make sure that the right month and year are displayed
self.wait_for_text('#calendarin0 caption', expected_caption)
class DateTimePickerSeleniumChromeTests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerSeleniumIETests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@skipIf(pytz is None, "this test requires pytz")
@override_settings(TIME_ZONE='Asia/Singapore')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerShortcutsSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_date_time_picker_shortcuts(self):
"""
Ensure that date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
self.admin_login(username='super', password='secret', login_url='/')
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = pytz.timezone('America/Chicago')
utc_now = datetime.now(pytz.utc)
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
now = datetime.now()
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
self.selenium.find_element_by_id('id_name').send_keys('test')
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements_by_css_selector(
'.field-birthdate .datetimeshortcuts')
for shortcut in shortcuts:
shortcut.find_element_by_tag_name('a').click()
# Check that there is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.selenium.find_elements_by_css_selector(
'.field-birthdate .timezonewarning')
# Submit the form.
self.selenium.find_element_by_tag_name('form').submit()
self.wait_page_loaded()
# Make sure that "now" in javascript is within 10 seconds
# from "now" on the server side.
member = models.Member.objects.get(name='test')
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
class DateTimePickerShortcutsSeleniumChromeTests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerShortcutsSeleniumIETests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class HorizontalVerticalFilterSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
self.lisa = models.Student.objects.create(name='Lisa')
self.john = models.Student.objects.create(name='John')
self.bob = models.Student.objects.create(name='Bob')
self.peter = models.Student.objects.create(name='Peter')
self.jenny = models.Student.objects.create(name='Jenny')
self.jason = models.Student.objects.create(name='Jason')
self.cliff = models.Student.objects.create(name='Cliff')
self.arthur = models.Student.objects.create(name='Arthur')
self.school = models.School.objects.create(name='School of Awesome')
super(HorizontalVerticalFilterSeleniumFirefoxTests, self).setUp()
def assertActiveButtons(self, mode, field_name, choose, remove,
choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.get_select_option(from_box, str(self.lisa.id))
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(from_lisa_select_option.get_attribute('title'), from_lisa_select_option.get_attribute('text'))
from_lisa_select_option.click()
self.get_select_option(from_box, str(self.jason.id)).click()
self.get_select_option(from_box, str(self.bob.id)).click()
self.get_select_option(from_box, str(self.john.id)).click()
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id)])
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.get_select_option(to_box, str(self.lisa.id))
self.assertEqual(to_lisa_select_option.get_attribute('title'), to_lisa_select_option.get_attribute('text'))
# Remove some options -------------------------------------------------
self.get_select_option(to_box, str(self.lisa.id)).click()
self.get_select_option(to_box, str(self.bob.id)).click()
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.arthur.id)).click()
self.get_select_option(from_box, str(self.cliff.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id)])
def test_basic(self):
self.school.students = [self.lisa, self.peter]
self.school.alumni = [self.lisa, self.peter]
self.school.save()
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get('%s%s' % (
self.live_server_url, reverse('admin:admin_widgets_school_change', args=(self.school.id,))))
self.wait_page_loaded()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()),
[self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Ensure that typing in the search box filters out options displayed in
the 'from' box.
"""
from selenium.webdriver.common.keys import Keys
self.school.students = [self.lisa, self.peter]
self.school.alumni = [self.lisa, self.peter]
self.school.save()
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_school_change', args=(self.school.id,))))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = '#id_%s_add_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
input = self.selenium.find_element_by_css_selector('#id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# -----------------------------------------------------------------
# Check that choosing a filtered option sends it properly to the
# 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.get_select_option(from_box, str(self.jason.id)).click()
self.selenium.find_element_by_css_selector(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.jason.id)])
self.get_select_option(to_box, str(self.lisa.id)).click()
self.selenium.find_element_by_css_selector(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Check that pressing enter on a filtered option sends it properly
# to the 'to' box.
self.get_select_option(to_box, str(self.jason.id)).click()
self.selenium.find_element_by_css_selector(remove_link).click()
input.send_keys('ja')
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()),
[self.jason, self.peter])
class HorizontalVerticalFilterSeleniumChromeTests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class HorizontalVerticalFilterSeleniumIETests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class AdminRawIdWidgetSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
models.Band.objects.create(id=42, name='Bogey Blues')
models.Band.objects.create(id=98, name='Green Potatoes')
super(AdminRawIdWidgetSeleniumFirefoxTests, self).setUp()
def test_ForeignKey(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_event_add')))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_main_band').get_attribute('value'),
'')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '98')
def test_many_to_many(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_event_add')))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_supporting_bands').get_attribute('value'),
'')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42,98')
class AdminRawIdWidgetSeleniumChromeTests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class AdminRawIdWidgetSeleniumIETests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class RelatedFieldWidgetSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_ForeignKey_using_to_field(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get('%s%s' % (
self.live_server_url,
reverse('admin:admin_widgets_profile_add')))
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element_by_id('add_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
self.wait_for('#id_password')
password_field = self.selenium.find_element_by_id('id_password')
password_field.send_keys('password')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'newuser'
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.wait_for('#id_user option[value="newuser"]')
# Click the Change User button to change it
self.selenium.find_element_by_id('change_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
self.wait_for('#id_username')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'changednewuser'
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to_window(main_window)
# Wait up to 2 seconds for the new option to show up after clicking save in the popup.
self.selenium.implicitly_wait(2)
self.selenium.find_element_by_css_selector('#id_user option[value=changednewuser]')
self.selenium.implicitly_wait(0)
# Go ahead and submit the form to make sure it works
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.wait_for_text('li.success', 'The profile "changednewuser" was added successfully.')
profiles = models.Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
class RelatedFieldWidgetSeleniumChromeTests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class RelatedFieldWidgetSeleniumIETests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
52ai/django-ccsds
|
tests/admin_widgets/tests.py
|
Python
|
bsd-3-clause
| 56,120
|
"""
URLs for blog app
"""
import os
from django.contrib import admin
from django.views.generic import TemplateView
from django.conf.urls import *
from rest_framework.urlpatterns import format_suffix_patterns
import api_views
api_urlpatterns = patterns('blog.api_views',
url(r'^$', 'api_root'),
url(r'^posts$', api_views.PostGenericList.as_view(), name='post-list'),
url(r'^posts/tag/(?P<tag>.+)$', api_views.TagGenericList.as_view(), name='tag'),
url(r'^posts/search/(?P<search_terms>.+)$', api_views.SearchGenericList.as_view(), name='search'),
url(r'^posts/user/(?P<username>.+)$', api_views.UserGenericList.as_view(), name='user'),
url(r'^posts/(?P<pk>[0-9]+)/comments$', api_views.CommentsGenericDetail.as_view(), name='comments'),
url(r'^posts/(?P<pk>.+)$', api_views.PostGenericDetail.as_view(), name='post-detail'),
url(r'^siteactivities$', api_views.SiteActivityGenericList.as_view(), name='site-activities'),
url(r'^siteactivities/(?P<timestamp>[0-9]+)$', api_views.SiteActivityGenericList.as_view(), name='site-activities'),
url(r'^alltags$', 'all_tags', name='all-tags'),
)
api_urlpatterns = format_suffix_patterns(api_urlpatterns)
urlpatterns = patterns(r'blog.views',
(r'^$', 'home_view'),
(r'^post/comment/(?P<post_id>.+)/(?P<permalink>.+)$', 'post_view'),
(r'^post/(?P<post_id>.+)/(?P<permalink>.+)$', 'post_view'),
(r'^post/(?P<permalink>.+)$', 'post_view'),
(r'^tag/(?P<tag_name>.+)$', 'tag_view'),
(r'^login$', 'login_view'),
(r'^logout$', 'logout_view'),
(r'^register$', 'register_view'),
(r'^create_post$', 'create_post_view' ),
(r'^delete_post/(?P<post_id>.+)$', 'delete_post_view' ),
(r'^edit_post/(?P<post_id>.+)$', 'edit_post_view' ),
(r'^search/$', 'search_view' ),
(r'^load_posts/$', 'load_posts_view' ),
(r'^api/', include(api_urlpatterns))
)
|
yeraydiazdiaz/nonrel-blog
|
blog/urls.py
|
Python
|
bsd-3-clause
| 1,876
|
# Generated by Django 2.2.20 on 2021-06-04 15:40
from django.db import migrations, models
ACCESS_INDEX = "audit_access_couch_10d1b_idx"
ACCESS_TABLE = "auditcare_accessaudit"
NAVIGATION_EVENT_INDEX = "audit_nav_couch_875bc_idx"
NAVIGATION_EVENT_TABLE = "auditcare_navigationeventaudit"
def _create_index_sql(table_name, index_name):
return """
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS {} ON {} (couch_id)
WHERE couch_id IS NOT NULL
""".format(index_name, table_name)
def _drop_index_sql(index_name):
return "DROP INDEX CONCURRENTLY IF EXISTS {}".format(index_name)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('auditcare', '0003_truncatechars'),
]
operations = [
migrations.AddField(
model_name='accessaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.AddField(
model_name='navigationeventaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(ACCESS_TABLE, ACCESS_INDEX),
reverse_sql=_drop_index_sql(ACCESS_INDEX),
state_operations=[
migrations.AddConstraint(
model_name='accessaudit',
constraint=models.UniqueConstraint(condition=models.Q(couch_id__isnull=False),
fields=('couch_id',), name=ACCESS_INDEX),
),
]
),
migrations.RunSQL(
sql=_create_index_sql(NAVIGATION_EVENT_TABLE, NAVIGATION_EVENT_INDEX),
reverse_sql=_drop_index_sql(NAVIGATION_EVENT_INDEX),
state_operations=[
migrations.AddConstraint(
model_name='navigationeventaudit',
constraint=models.UniqueConstraint(condition=models.Q(couch_id__isnull=False),
fields=('couch_id',), name=NAVIGATION_EVENT_INDEX),
),
]
),
]
|
dimagi/commcare-hq
|
corehq/apps/auditcare/migrations/0004_add_couch_id.py
|
Python
|
bsd-3-clause
| 2,171
|
# -*- coding: utf-8 -*-
import os, popen2, time
import tables
tref = time.time()
trel = tref
def show_mem(explain):
global tref, trel
cmd = "cat /proc/%s/status" % os.getpid()
sout, sin = popen2.popen2(cmd)
for line in sout:
if line.startswith("VmSize:"):
vmsize = int(line.split()[1])
elif line.startswith("VmRSS:"):
vmrss = int(line.split()[1])
elif line.startswith("VmData:"):
vmdata = int(line.split()[1])
elif line.startswith("VmStk:"):
vmstk = int(line.split()[1])
elif line.startswith("VmExe:"):
vmexe = int(line.split()[1])
elif line.startswith("VmLib:"):
vmlib = int(line.split()[1])
sout.close()
sin.close()
print "\nMemory usage: ******* %s *******" % explain
print "VmSize: %7s kB\tVmRSS: %7s kB" % (vmsize, vmrss)
print "VmData: %7s kB\tVmStk: %7s kB" % (vmdata, vmstk)
print "VmExe: %7s kB\tVmLib: %7s kB" % (vmexe, vmlib)
print "WallClock time:", time.time() - tref,
print " Delta time:", time.time() - trel
trel = time.time()
def write_group(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "w")
for child in range(nchildren):
fileh.createGroup(fileh.root, 'group' + str(child),
"child: %d" % child)
show_mem("After creating. Iter %s" % i)
fileh.close()
show_mem("After close")
def read_group(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "r")
for child in range(nchildren):
node = fileh.getNode(fileh.root, 'group' + str(child))
assert node is not None
#flavor = node._v_attrs.CLASS
# for child in fileh.walkNodes():
# pass
show_mem("After reading metadata. Iter %s" % i)
fileh.close()
show_mem("After close")
def write_array(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "w")
for child in range(nchildren):
fileh.createArray(fileh.root, 'array' + str(child),
[1, 1], "child: %d" % child)
show_mem("After creating. Iter %s" % i)
fileh.close()
show_mem("After close")
def read_array(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "r")
for child in range(nchildren):
node = fileh.getNode(fileh.root, 'array' + str(child))
#flavor = node._v_attrs.FLAVOR
data = node[:] # Read data
assert data is not None
show_mem("After reading data. Iter %s" % i)
# for child in range(nchildren):
# node = fileh.getNode(fileh.root, 'array' + str(child))
# flavor = node._v_attrs.FLAVOR
#flavor = node._v_attrs
# for child in fileh.walkNodes():
# pass
# show_mem("After reading metadata. Iter %s" % i)
fileh.close()
show_mem("After close")
def write_carray(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "w")
for child in range(nchildren):
fileh.createCArray(fileh.root, 'array' + str(child),
tables.IntAtom(), (2,), "child: %d" % child)
show_mem("After creating. Iter %s" % i)
fileh.close()
show_mem("After close")
def read_carray(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "r")
for child in range(nchildren):
node = fileh.getNode(fileh.root, 'array' + str(child))
#flavor = node._v_attrs.FLAVOR
data = node[:] # Read data
assert data is not None
#print "data-->", data
show_mem("After reading data. Iter %s" % i)
fileh.close()
show_mem("After close")
def write_earray(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "w")
for child in range(nchildren):
ea = fileh.createEArray(fileh.root, 'array' + str(child),
tables.IntAtom(), shape=(0,),
title="child: %d" % child)
ea.append([1, 2, 3])
show_mem("After creating. Iter %s" % i)
fileh.close()
show_mem("After close")
def read_earray(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "r")
for child in range(nchildren):
node = fileh.getNode(fileh.root, 'array' + str(child))
#flavor = node._v_attrs.FLAVOR
data = node[:] # Read data
assert data is not None
#print "data-->", data
show_mem("After reading data. Iter %s" % i)
fileh.close()
show_mem("After close")
def write_vlarray(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "w")
for child in range(nchildren):
vl = fileh.createVLArray(fileh.root, 'array' + str(child),
tables.IntAtom(), "child: %d" % child)
vl.append([1, 2, 3])
show_mem("After creating. Iter %s" % i)
fileh.close()
show_mem("After close")
def read_vlarray(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "r")
for child in range(nchildren):
node = fileh.getNode(fileh.root, 'array' + str(child))
#flavor = node._v_attrs.FLAVOR
data = node[:] # Read data
assert data is not None
#print "data-->", data
show_mem("After reading data. Iter %s" % i)
fileh.close()
show_mem("After close")
def write_table(file, nchildren, niter):
class Record(tables.IsDescription):
var1 = tables.IntCol(pos=1)
var2 = tables.StringCol(length=1, pos=2)
var3 = tables.FloatCol(pos=3)
for i in range(niter):
fileh = tables.openFile(file, mode = "w")
for child in range(nchildren):
t = fileh.createTable(fileh.root, 'table' + str(child),
Record, "child: %d" % child)
t.append([[1, "2", 3.]])
show_mem("After creating. Iter %s" % i)
fileh.close()
show_mem("After close")
def read_table(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "r")
for child in range(nchildren):
node = fileh.getNode(fileh.root, 'table' + str(child))
#klass = node._v_attrs.CLASS
data = node[:] # Read data
assert data is not None
#print "data-->", data
show_mem("After reading data. Iter %s" % i)
fileh.close()
show_mem("After close")
def write_xtable(file, nchildren, niter):
class Record(tables.IsDescription):
var1 = tables.IntCol(pos=1)
var2 = tables.StringCol(length=1, pos=2)
var3 = tables.FloatCol(pos=3)
for i in range(niter):
fileh = tables.openFile(file, mode = "w")
for child in range(nchildren):
t = fileh.createTable(fileh.root, 'table' + str(child),
Record, "child: %d" % child)
t.append([[1, "2", 3.]])
t.cols.var1.createIndex()
show_mem("After creating. Iter %s" % i)
fileh.close()
show_mem("After close")
def read_xtable(file, nchildren, niter):
for i in range(niter):
fileh = tables.openFile(file, mode = "r")
for child in range(nchildren):
node = fileh.getNode(fileh.root, 'table' + str(child))
#klass = node._v_attrs.CLASS
#data = node[:] # Read data
#print "data-->", data
show_mem("After reading data. Iter %s" % i)
fileh.close()
show_mem("After close")
del node
if __name__ == '__main__':
import sys, getopt, pstats
import profile as prof
usage = """usage: %s [-v] [-p] [-a] [-c] [-e] [-l] [-t] [-x] [-g] [-r] [-w] [-c nchildren] [-n iter] file
-v verbose
-p profile
-a create/read arrays (default)
-c create/read carrays
-e create/read earrays
-l create/read vlrrays
-t create/read tables
-x create/read indexed tables
-g create/read groups
-r only read test
-w only write test
-n number of children (4000 is the default)
-i number of iterations (default is 3)
\n"""
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'vpaceltxgrwn:i:')
except:
sys.stderr.write(usage)
sys.exit(0)
# if we pass too much parameters, abort
if len(pargs) != 1:
sys.stderr.write(usage)
sys.exit(0)
# default options
verbose = 0
profile = 0
array = 1
carray = 0
earray = 0
vlarray = 0
table = 0
xtable = 0
group = 0
write = 0
read = 0
nchildren = 1000
niter = 5
# Get the options
for option in opts:
if option[0] == '-v':
verbose = 1
elif option[0] == '-p':
profile = 1
elif option[0] == '-a':
carray = 1
elif option[0] == '-c':
array = 0
carray = 1
elif option[0] == '-e':
array = 0
earray = 1
elif option[0] == '-l':
array = 0
vlarray = 1
elif option[0] == '-t':
array = 0
table = 1
elif option[0] == '-x':
array = 0
xtable = 1
elif option[0] == '-g':
array = 0
cgroup = 1
elif option[0] == '-w':
write = 1
elif option[0] == '-r':
read = 1
elif option[0] == '-n':
nchildren = int(option[1])
elif option[0] == '-i':
niter = int(option[1])
# Catch the hdf5 file passed as the last argument
file = pargs[0]
if array:
fwrite = 'write_array'
fread = 'read_array'
elif carray:
fwrite = 'write_carray'
fread = 'read_carray'
elif earray:
fwrite = 'write_earray'
fread = 'read_earray'
elif vlarray:
fwrite = 'write_vlarray'
fread = 'read_vlarray'
elif table:
fwrite = 'write_table'
fread = 'read_table'
elif xtable:
fwrite = 'write_xtable'
fread = 'read_xtable'
elif group:
fwrite = 'write_group'
fread = 'read_group'
show_mem("Before open")
if write:
if profile:
prof.run(str(fwrite)+'(file, nchildren, niter)', 'write_file.prof')
stats = pstats.Stats('write_file.prof')
stats.strip_dirs()
stats.sort_stats('time', 'calls')
if verbose:
stats.print_stats()
else:
stats.print_stats(20)
else:
eval(fwrite+'(file, nchildren, niter)')
if read:
if profile:
prof.run(fread+'(file, nchildren, niter)', 'read_file.prof')
stats = pstats.Stats('read_file.prof')
stats.strip_dirs()
stats.sort_stats('time', 'calls')
if verbose:
stats.print_stats()
else:
stats.print_stats(20)
else:
eval(fread+'(file, nchildren, niter)')
|
cpcloud/PyTables
|
tables/tests/check_leaks.py
|
Python
|
bsd-3-clause
| 11,667
|
# Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import operator
import math
import sys
import timeit
from scipy.spatial import cKDTree
from . import sigtools, dlti
from ._upfirdn import upfirdn, _output_len, _upfirdn_modes
from scipy import linalg, fft as sp_fft
from scipy.fft._helper import _init_nd_shape_and_axes
import numpy as np
from scipy.special import lambertw
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
from .filter_design import cheby1, _validate_sos
from .fir_filter_design import firwin
from ._sosfilt import _sosfilt
__all__ = ['correlate', 'correlate2d',
'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
def _valfrommode(mode):
try:
return _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _bvalfromboundary(boundary):
try:
return _boundarydict[boundary] << 2
except KeyError:
raise ValueError("Acceptable boundary flags are 'fill', 'circular' "
"(or 'wrap'), and 'symmetric' (or 'symm').")
def _inputs_swap_needed(mode, shape1, shape2, axes=None):
"""Determine if inputs arrays need to be swapped in `"valid"` mode.
If in `"valid"` mode, returns whether or not the input arrays need to be
swapped depending on whether `shape1` is at least as large as `shape2` in
every calculated dimension.
This is important for some of the correlation and convolution
implementations in this module, where the larger array input needs to come
before the smaller array input when operating in this mode.
Note that if the mode provided is not 'valid', False is immediately
returned.
"""
if mode != 'valid':
return False
if not shape1:
return False
if axes is None:
axes = range(len(shape1))
ok1 = all(shape1[i] >= shape2[i] for i in axes)
ok2 = all(shape2[i] >= shape1[i] for i in axes)
if not (ok1 or ok2):
raise ValueError("For 'valid' mode, one must be at least "
"as large as the other in every dimension")
return not ok1
def correlate(in1, in2, mode='full', method='auto'):
r"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``
then
.. math::
z[k] = (x * y)(k - N + 1)
= \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
and :math:`y_m` is 0 when m is outside the range of y.
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
When using "same" mode with even-length inputs, the outputs of `correlate`
and `correlate2d` differ: There is a 1-index offset between them.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2.conj()
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# this either calls fftconvolve or this function with method=='direct'
if method in ('fft', 'auto'):
return convolve(in1, _reverse_and_conj(in2), mode, method)
elif method == 'direct':
# fastpath to faster numpy.correlate for 1d inputs when possible
if _np_conv_ok(in1, in2, mode):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward if mode == 'full'. Also, it fails
# with 'valid' mode if in2 is larger than in1, so swap those, too.
# Don't swap inputs for 'same' mode, since shape of in1 matters.
swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
_inputs_swap_needed(mode, in1.shape, in2.shape))
if swapped_inputs:
in1, in2 = in2, in1
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = tuple(slice(0, i) for i in in1.shape)
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
if swapped_inputs:
# Reverse and conjugate to undo the effect of swapping inputs
z = _reverse_and_conj(z)
return z
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False):
"""Handle the axes argument for frequency-domain convolution.
Returns the inputs and axes in a standard form, eliminating redundant axes,
swapping the inputs if necessary, and checking for various potential
errors.
Parameters
----------
in1 : array
First input.
in2 : array
Second input.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output.
See the documentation `fftconvolve` for more information.
axes : list of ints
Axes over which to compute the FFTs.
sorted_axes : bool, optional
If `True`, sort the axes.
Default is `False`, do not sort.
Returns
-------
in1 : array
The first input, possible swapped with the second input.
in2 : array
The second input, possible swapped with the first input.
axes : list of ints
Axes over which to compute the FFTs.
"""
s1 = in1.shape
s2 = in2.shape
noaxes = axes is None
_, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes)
if not noaxes and not len(axes):
raise ValueError("when provided, axes cannot be empty")
# Axes of length 1 can rely on broadcasting rules for multipy,
# no fft needed.
axes = [a for a in axes if s1[a] != 1 and s2[a] != 1]
if sorted_axes:
axes.sort()
if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1
for a in range(in1.ndim) if a not in axes):
raise ValueError("incompatible shapes for in1 and in2:"
" {0} and {1}".format(s1, s2))
# Check that input sizes are compatible with 'valid' mode.
if _inputs_swap_needed(mode, s1, s2, axes=axes):
# Convolution is commutative; order doesn't have any effect on output.
in1, in2 = in2, in1
return in1, in2, axes
def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False):
"""Convolve two arrays in the frequency domain.
This function implements only base the FFT-related operations.
Specifically, it converts the signals to the frequency domain, multiplies
them, then converts them back to the time domain. Calculations of axes,
shapes, convolution mode, etc. are implemented in higher level-functions,
such as `fftconvolve` and `oaconvolve`. Those functions should be used
instead of this one.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
axes : array_like of ints
Axes over which to compute the FFTs.
shape : array_like of ints
The sizes of the FFTs.
calc_fast_len : bool, optional
If `True`, set each value of `shape` to the next fast FFT length.
Default is `False`, use `axes` as-is.
Returns
-------
out : array
An N-dimensional array containing the discrete linear convolution of
`in1` with `in2`.
"""
if not len(axes):
return in1 * in2
complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c')
if calc_fast_len:
# Speed up FFT by padding to optimal size.
fshape = [
sp_fft.next_fast_len(shape[a], not complex_result) for a in axes]
else:
fshape = shape
if not complex_result:
fft, ifft = sp_fft.rfftn, sp_fft.irfftn
else:
fft, ifft = sp_fft.fftn, sp_fft.ifftn
sp1 = fft(in1, fshape, axes=axes)
sp2 = fft(in2, fshape, axes=axes)
ret = ifft(sp1 * sp2, fshape, axes=axes)
if calc_fast_len:
fslice = tuple([slice(sz) for sz in shape])
ret = ret[fslice]
return ret
def _apply_conv_mode(ret, s1, s2, mode, axes):
"""Calculate the convolution result shape based on the `mode` argument.
Returns the result sliced to the correct size for the given mode.
Parameters
----------
ret : array
The result array, with the appropriate shape for the 'full' mode.
s1 : list of int
The shape of the first input.
s2 : list of int
The shape of the second input.
mode : str {'full', 'valid', 'same'}
A string indicating the size of the output.
See the documentation `fftconvolve` for more information.
axes : list of ints
Axes over which to compute the convolution.
Returns
-------
ret : array
A copy of `res`, sliced to the correct size for the given `mode`.
"""
if mode == "full":
return ret.copy()
elif mode == "same":
return _centered(ret, s1).copy()
elif mode == "valid":
shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1
for a in range(ret.ndim)]
return _centered(ret, shape_valid).copy()
else:
raise ValueError("acceptable mode flags are 'valid',"
" 'same', or 'full'")
def fftconvolve(in1, in2, mode="full", axes=None):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
axes : int or array_like of ints or None, optional
Axes over which to compute the convolution.
The default is over all axes.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
convolve : Uses the direct convolution or FFT convolution algorithm
depending on which is faster.
oaconvolve : Uses the overlap-add method to do convolution, which is
generally faster when the input arrays are large and
significantly different in size.
Examples
--------
Autocorrelation of white noise is an impulse.
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return np.array([])
in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,
sorted_axes=False)
s1 = in1.shape
s2 = in2.shape
shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1
for i in range(in1.ndim)]
ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True)
return _apply_conv_mode(ret, s1, s2, mode, axes)
def _calc_oa_lens(s1, s2):
"""Calculate the optimal FFT lengths for overlapp-add convolution.
The calculation is done for a single dimension.
Parameters
----------
s1 : int
Size of the dimension for the first array.
s2 : int
Size of the dimension for the second array.
Returns
-------
block_size : int
The size of the FFT blocks.
overlap : int
The amount of overlap between two blocks.
in1_step : int
The size of each step for the first array.
in2_step : int
The size of each step for the first array.
"""
# Set up the arguments for the conventional FFT approach.
fallback = (s1+s2-1, None, s1, s2)
# Use conventional FFT convolve if sizes are same.
if s1 == s2 or s1 == 1 or s2 == 1:
return fallback
if s2 > s1:
s1, s2 = s2, s1
swapped = True
else:
swapped = False
# There cannot be a useful block size if s2 is more than half of s1.
if s2 >= s1/2:
return fallback
# Derivation of optimal block length
# For original formula see:
# https://en.wikipedia.org/wiki/Overlap-add_method
#
# Formula:
# K = overlap = s2-1
# N = block_size
# C = complexity
# e = exponential, exp(1)
#
# C = (N*(log2(N)+1))/(N-K)
# C = (N*log2(2N))/(N-K)
# C = N/(N-K) * log2(2N)
# C1 = N/(N-K)
# C2 = log2(2N) = ln(2N)/ln(2)
#
# dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2
# dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2))
#
# dC/dN = dC1/dN*C2 + dC2/dN*C1
# dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K))
# dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K))
# dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2)
# dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2)
# dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)
#
# Solve for minimum, where dC/dN = 0
# 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)
# 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K
# 0 = N - K*ln(2N) - K
# 0 = N - K*(ln(2N) + 1)
# 0 = N - K*ln(2Ne)
# N = K*ln(2Ne)
# N/K = ln(2Ne)
#
# e^(N/K) = e^ln(2Ne)
# e^(N/K) = 2Ne
# 1/e^(N/K) = 1/(2*N*e)
# e^(N/-K) = 1/(2*N*e)
# e^(N/-K) = K/N*1/(2*K*e)
# N/K*e^(N/-K) = 1/(2*e*K)
# N/-K*e^(N/-K) = -1/(2*e*K)
#
# Using Lambert W function
# https://en.wikipedia.org/wiki/Lambert_W_function
# x = W(y) It is the solution to y = x*e^x
# x = N/-K
# y = -1/(2*e*K)
#
# N/-K = W(-1/(2*e*K))
#
# N = -K*W(-1/(2*e*K))
overlap = s2-1
opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real
block_size = sp_fft.next_fast_len(math.ceil(opt_size))
# Use conventional FFT convolve if there is only going to be one block.
if block_size >= s1:
return fallback
if not swapped:
in1_step = block_size-s2+1
in2_step = s2
else:
in1_step = s2
in2_step = block_size-s2+1
return block_size, overlap, in1_step, in2_step
def oaconvolve(in1, in2, mode="full", axes=None):
"""Convolve two N-dimensional arrays using the overlap-add method.
Convolve `in1` and `in2` using the overlap-add method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
and generally much faster than `fftconvolve` when one array is much
larger than the other, but can be slower when only a few output values are
needed or when the arrays are very similar in shape, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
axes : int or array_like of ints or None, optional
Axes over which to compute the convolution.
The default is over all axes.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
convolve : Uses the direct convolution or FFT convolution algorithm
depending on which is faster.
fftconvolve : An implementation of convolution using FFT.
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Convolve a 100,000 sample signal with a 512-sample filter.
>>> from scipy import signal
>>> sig = np.random.randn(100000)
>>> filt = signal.firwin(512, 0.01)
>>> fsig = signal.oaconvolve(sig, filt)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(fsig)
>>> ax_mag.set_title('Filtered noise')
>>> fig.tight_layout()
>>> fig.show()
References
----------
.. [1] Wikipedia, "Overlap-add_method".
https://en.wikipedia.org/wiki/Overlap-add_method
.. [2] Richard G. Lyons. Understanding Digital Signal Processing,
Third Edition, 2011. Chapter 13.10.
ISBN 13: 978-0137-02741-5
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return np.array([])
elif in1.shape == in2.shape: # Equivalent to fftconvolve
return fftconvolve(in1, in2, mode=mode, axes=axes)
in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,
sorted_axes=True)
if not axes:
return in1*in2
s1 = in1.shape
s2 = in2.shape
# Calculate this now since in1 is changed later
shape_final = [None if i not in axes else
s1[i] + s2[i] - 1 for i in range(in1.ndim)]
# Calculate the block sizes for the output, steps, first and second inputs.
# It is simpler to calculate them all together than doing them in separate
# loops due to all the special cases that need to be handled.
optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else
_calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim))
block_size, overlaps, \
in1_step, in2_step = zip(*optimal_sizes)
# Fall back to fftconvolve if there is only one block in every dimension.
if in1_step == s1 and in2_step == s2:
return fftconvolve(in1, in2, mode=mode, axes=axes)
# Figure out the number of steps and padding.
# This would get too complicated in a list comprehension.
nsteps1 = []
nsteps2 = []
pad_size1 = []
pad_size2 = []
for i in range(in1.ndim):
if i not in axes:
pad_size1 += [(0, 0)]
pad_size2 += [(0, 0)]
continue
if s1[i] > in1_step[i]:
curnstep1 = math.ceil((s1[i]+1)/in1_step[i])
if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]:
curnstep1 += 1
curpad1 = curnstep1*in1_step[i] - s1[i]
else:
curnstep1 = 1
curpad1 = 0
if s2[i] > in2_step[i]:
curnstep2 = math.ceil((s2[i]+1)/in2_step[i])
if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]:
curnstep2 += 1
curpad2 = curnstep2*in2_step[i] - s2[i]
else:
curnstep2 = 1
curpad2 = 0
nsteps1 += [curnstep1]
nsteps2 += [curnstep2]
pad_size1 += [(0, curpad1)]
pad_size2 += [(0, curpad2)]
# Pad the array to a size that can be reshaped to the desired shape
# if necessary.
if not all(curpad == (0, 0) for curpad in pad_size1):
in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0)
if not all(curpad == (0, 0) for curpad in pad_size2):
in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0)
# Reshape the overlap-add parts to input block sizes.
split_axes = [iax+i for i, iax in enumerate(axes)]
fft_axes = [iax+1 for iax in split_axes]
# We need to put each new dimension before the corresponding dimension
# being reshaped in order to get the data in the right layout at the end.
reshape_size1 = list(in1_step)
reshape_size2 = list(in2_step)
for i, iax in enumerate(split_axes):
reshape_size1.insert(iax, nsteps1[i])
reshape_size2.insert(iax, nsteps2[i])
in1 = in1.reshape(*reshape_size1)
in2 = in2.reshape(*reshape_size2)
# Do the convolution.
fft_shape = [block_size[i] for i in axes]
ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False)
# Do the overlap-add.
for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes):
overlap = overlaps[ax]
if overlap is None:
continue
ret, overpart = np.split(ret, [-overlap], ax_fft)
overpart = np.split(overpart, [-1], ax_split)[0]
ret_overpart = np.split(ret, [overlap], ax_fft)[0]
ret_overpart = np.split(ret_overpart, [1], ax_split)[1]
ret_overpart += overpart
# Reshape back to the correct dimensionality.
shape_ret = [ret.shape[i] if i not in fft_axes else
ret.shape[i]*ret.shape[i-1]
for i in range(ret.ndim) if i not in split_axes]
ret = ret.reshape(*shape_ret)
# Slice to the correct size.
slice_final = tuple([slice(islice) for islice in shape_final])
ret = ret[slice_final]
return _apply_conv_mode(ret, s1, s2, mode, axes)
def _numeric_arrays(arrays, kinds='buifc'):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == np.ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _prod(iterable):
"""
Product of a list of numbers.
Faster than np.prod for short lists like array shapes.
"""
product = 1
for x in iterable:
product *= x
return product
def _conv_ops(x_shape, h_shape, mode):
"""
Find the number of operations required for direct/fft methods of
convolution. The direct operations were recorded by making a dummy class to
record the number of operations by overriding ``__mul__`` and ``__add__``.
The FFT operations rely on the (well-known) computational complexity of the
FFT (and the implementation of ``_freq_domain_conv``).
"""
if mode == "full":
out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
elif mode == "valid":
out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)]
elif mode == "same":
out_shape = x_shape
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full', not mode={}".format(mode))
s1, s2 = x_shape, h_shape
if len(x_shape) == 1:
s1, s2 = s1[0], s2[0]
if mode == "full":
direct_ops = s1 * s2
elif mode == "valid":
direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2
elif mode == "same":
direct_ops = s1 * s2 if s1 < s2 else s1 * s2 - (s2 // 2) * ((s2 + 1) // 2)
else:
if mode == "full":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "valid":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "same":
direct_ops = _prod(s1) * _prod(s2)
full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
N = _prod(full_out_shape)
fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape
return fft_ops, direct_ops
def _fftconv_faster(x, h, mode):
"""
See if using fftconvolve or convolve is faster.
Parameters
----------
x : np.ndarray
Signal
h : np.ndarray
Kernel
mode : str
Mode passed to convolve
Returns
-------
fft_faster : bool
Notes
-----
See docstring of `choose_conv_method` for details on tuning hardware.
See pull request 11031 for more detail:
https://github.com/scipy/scipy/pull/11031.
"""
fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)
offset = -1e-3 if x.ndim == 1 else -1e-4
constants = {
"valid": (1.89095737e-9, 2.1364985e-10, offset),
"full": (1.7649070e-9, 2.1414831e-10, offset),
"same": (3.2646654e-9, 2.8478277e-10, offset)
if h.size <= x.size
else (3.21635404e-9, 1.1773253e-8, -1e-5),
} if x.ndim == 1 else {
"valid": (1.85927e-9, 2.11242e-8, offset),
"full": (1.99817e-9, 1.66174e-8, offset),
"same": (2.04735e-9, 1.55367e-8, offset),
}
O_fft, O_direct, O_offset = constants[mode]
return O_fft * fft_ops < O_direct * direct_ops + O_offset
def _reverse_and_conj(x):
"""
Reverse array `x` in all dimensions and perform the complex conjugate
"""
reverse = (slice(None, None, -1),) * x.ndim
return x[reverse].conj()
def _np_conv_ok(volume, kernel, mode):
"""
See if numpy supports convolution of `volume` and `kernel` (i.e. both are
1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the
size of the larger input, while SciPy's uses the size of the first input.
Invalid mode strings will return False and be caught by the calling func.
"""
if volume.ndim == kernel.ndim == 1:
if mode in ('full', 'valid'):
return True
elif mode == 'same':
return volume.size >= kernel.size
else:
return False
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`. It can also be used to determine the value of
``method`` for many different convolutions of the same dtype/shape.
In addition, it supports timing the convolution to adapt the value of
``method`` to a particular set of inputs and/or hardware.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
Generally, this method is 99% accurate for 2D signals and 85% accurate
for 1D signals for randomly chosen input sizes. For precision, use
``measure=True`` to find the fastest method by timing the convolution.
This can be used to avoid the minimal overhead of finding the fastest
``method`` later, or to adapt the value of ``method`` to a particular set
of inputs.
Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this
function. These experiments measured the ratio between the time required
when using ``method='auto'`` and the time required for the fastest method
(i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these
experiments, we found:
* There is a 95% chance of this ratio being less than 1.5 for 1D signals
and a 99% chance of being less than 2.5 for 2D signals.
* The ratio was always less than 2.5/5 for 1D/2D signals respectively.
* This function is most inaccurate for 1D convolutions that take between 1
and 10 milliseconds with ``method='direct'``. A good proxy for this
(at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``.
The 2D results almost certainly generalize to 3D/4D/etc because the
implementation is the same (the 1D implementation is different).
All the numbers above are specific to the EC2 machine. However, we did find
that this function generalizes fairly decently across hardware. The speed
tests were of similar quality (and even slightly better) than the same
tests performed on the machine to tune this function's numbers (a mid-2014
15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor).
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> img = np.random.rand(32, 32)
>>> filter = np.random.rand(8, 8)
>>> method = signal.choose_conv_method(img, filter, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> img2 = np.random.rand(32, 32)
>>> filter2 = np.random.rand(8, 8)
>>> corr2 = signal.correlate(img2, filter2, mode='same', method=method)
>>> conv2 = signal.convolve(img2, filter2, mode='same', method=method)
The output of this function (``method``) works with `correlate` and
`convolve`.
"""
volume = np.asarray(in1)
kernel = np.asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel], kinds='b'):
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct'
def convolve(in1, in2, mode='full', method='auto'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
choose_conv_method : chooses the fastest appropriate convolution method
fftconvolve : Always uses the FFT method.
oaconvolve : Uses the overlap-add method to do convolution, which is
generally faster when the input arrays are large and
significantly different in size.
Notes
-----
By default, `convolve` and `correlate` use ``method='auto'``, which calls
`choose_conv_method` to choose the fastest method using pre-computed
values (`choose_conv_method` can also measure real-world timing with a
keyword argument). Because `fftconvolve` relies on floating point numbers,
there are certain constraints that may force `method=direct` (more detail
in `choose_conv_method` docstring).
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = np.asarray(in1)
kernel = np.asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
elif volume.ndim != kernel.ndim:
raise ValueError("volume and kernel should have the same "
"dimensionality")
if _inputs_swap_needed(mode, volume.shape, kernel.shape):
# Convolution is commutative; order doesn't have any effect on output
volume, kernel = kernel, volume
if method == 'auto':
method = choose_conv_method(volume, kernel, mode=mode)
if method == 'fft':
out = fftconvolve(volume, kernel, mode=mode)
result_type = np.result_type(volume, kernel)
if result_type.kind in {'u', 'i'}:
out = np.around(out)
return out.astype(result_type)
elif method == 'direct':
# fastpath to faster numpy.convolve for 1d inputs when possible
if _np_conv_ok(volume, kernel, mode):
return np.convolve(volume, kernel, mode)
return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-D array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = np.asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`. The array will automatically be zero-padded.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
See also
--------
scipy.ndimage.median_filter
Notes
-------
The more general function `scipy.ndimage.median_filter` has a more
efficient implementation of a median filter and therefore runs much faster.
"""
volume = np.atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * volume.ndim
kernel_size = np.asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = np.ones(kernel_size)
numels = np.prod(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
Examples
--------
>>> from scipy.misc import face
>>> from scipy.signal.signaltools import wiener
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> img = np.random.random((40, 40)) #Create a random image
>>> filtered_img = wiener(img, (5, 5)) #Filter the image
>>> f, (plot1, plot2) = plt.subplots(1, 2)
>>> plot1.imshow(img)
>>> plot2.imshow(filtered_img)
>>> plt.show()
Notes
-----
This implementation is similar to wiener2 in Matlab/Octave.
For more details see [1]_
References
----------
.. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing,
Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548.
"""
im = np.asarray(im)
if mysize is None:
mysize = [3] * im.ndim
mysize = np.asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, np.ones(mysize), 'same') / np.prod(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, np.ones(mysize), 'same') /
np.prod(mysize, axis=0) - lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = np.mean(np.ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = np.where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('convolve2d inputs must both be 2-D arrays')
if _inputs_swap_needed(mode, in1.shape, in2.shape):
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
When using "same" mode with even-length inputs, the outputs of `correlate`
and `correlate2d` differ: There is a 1-index offset between them.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('correlate2d inputs must both be 2-D arrays')
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)
if swapped_inputs:
out = out[::-1, ::-1]
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd). The array is zero-padded
automatically.
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
See also
--------
scipy.ndimage.median_filter
Notes
-------
The more general function `scipy.ndimage.median_filter` has a more
efficient implementation of a median filter and therefore runs much faster.
"""
image = np.asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = np.asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
The function `sosfilt` (and filter design using ``output='sos'``) should be
preferred over `lfilter` for most filtering tasks, as second-order sections
have fewer numerical problems.
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
where `M` is the degree of the numerator, `N` is the degree of the
denominator, and `n` is the sample number. It is implemented using
the following difference equations (assuming M = N)::
a[0]*y[n] = b[0] * x[n] + d[0][n-1]
d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
...
d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
d[N-1][n] = b[N] * x[n] - a[N] * y[n]
where `d` are the state variables.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
... 0.1*np.sin(2*np.pi*1.25*t + 1) +
... 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + np.random.randn(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = _validate_x(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of
# singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[tuple(ind)] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[tuple(ind)]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[tuple(ind)]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter given input and output vectors.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
where ``K = max(M, N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = np.asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = np.zeros(K, y.dtype)
if x is None:
x = np.zeros(M, y.dtype)
else:
x = np.asarray(x)
L = np.size(x)
if L < M:
x = np.r_[x, np.zeros(M - L)]
L = np.size(y)
if L < N:
y = np.r_[y, np.zeros(N - L)]
for m in range(M):
zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See Also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = np.atleast_1d(signal)
den = np.atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = np.zeros(N - D + 1, float)
input[0] = 1
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = (np.diff(instantaneous_phase) /
... (2.0*np.pi) * fs)
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
https://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = np.asarray(x)
if np.iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = sp_fft.fft(x, N, axis=axis)
h = np.zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if x.ndim > 1:
ind = [np.newaxis] * x.ndim
ind[axis] = slice(None)
h = h[tuple(ind)]
x = sp_fft.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
https://en.wikipedia.org/wiki/Analytic_signal
"""
x = np.atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if np.iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = sp_fft.fft2(x, N, axes=(0, 1))
h1 = np.zeros(N[0], 'd')
h2 = np.zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, np.newaxis] * h2[np.newaxis, :]
k = x.ndim
while k > 2:
h = h[:, np.newaxis]
k -= 1
x = sp_fft.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
Examples
--------
>>> from scipy import signal
>>> vals = [1, 4, 1+1.j, 3]
>>> p_sorted, indx = signal.cmplx_sort(vals)
>>> p_sorted
array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])
>>> indx
array([0, 2, 3, 1])
"""
p = np.asarray(p)
indx = np.argsort(abs(p))
return np.take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. Refer to Notes about
the details on roots grouping.
rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max', 'maximum': pick the maximum of those roots
- 'min', 'minimum': pick the minimum of those roots
- 'avg', 'mean': take the average of those roots
When finding minimum or maximum among complex roots they are compared
first by the real part and then by the imaginary part.
Returns
-------
unique : ndarray
The list of unique roots.
multiplicity : ndarray
The multiplicity of each root.
Notes
-----
If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to
``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it
doesn't necessarily mean that ``a`` is close to ``c``. It means that roots
grouping is not unique. In this function we use "greedy" grouping going
through the roots in the order they are given in the input `p`.
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
reduce = np.max
elif rtype in ['min', 'minimum']:
reduce = np.min
elif rtype in ['avg', 'mean']:
reduce = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = np.asarray(p)
points = np.empty((len(p), 2))
points[:, 0] = np.real(p)
points[:, 1] = np.imag(p)
tree = cKDTree(points)
p_unique = []
p_multiplicity = []
used = np.zeros(len(p), dtype=bool)
for i in range(len(p)):
if used[i]:
continue
group = tree.query_ball_point(points[i], tol)
group = [x for x in group if not used[x]]
p_unique.append(reduce(p[group]))
p_multiplicity.append(len(group))
used[group] = True
return np.asarray(p_unique), np.asarray(p_multiplicity)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(s) and a(s) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `invresz`.
Parameters
----------
r : array_like
Residues corresponding to the poles. For repeated poles, the residues
must be ordered to correspond to ascending by power fractions.
p : array_like
Poles. Equal poles must be adjacent.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. See `unique_roots`
for further details.
rtype : {'avg', 'min', 'max'}, optional
Method for computing a root to represent a group of identical roots.
Default is 'avg'. See `unique_roots` for further details.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residue, invresz, unique_roots
"""
r = np.atleast_1d(r)
p = np.atleast_1d(p)
k = np.trim_zeros(np.atleast_1d(k), 'f')
unique_poles, multiplicity = _group_poles(p, tol, rtype)
factors, denominator = _compute_factors(unique_poles, multiplicity,
include_powers=True)
if len(k) == 0:
numerator = 0
else:
numerator = np.polymul(k, denominator)
for residue, factor in zip(r, factors):
numerator = np.polyadd(numerator, residue * factor)
return numerator, denominator
def _compute_factors(roots, multiplicity, include_powers=False):
"""Compute the total polynomial divided by factors for each root."""
current = np.array([1])
suffixes = [current]
for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):
monomial = np.array([1, -pole])
for _ in range(mult):
current = np.polymul(current, monomial)
suffixes.append(current)
suffixes = suffixes[::-1]
factors = []
current = np.array([1])
for pole, mult, suffix in zip(roots, multiplicity, suffixes):
monomial = np.array([1, -pole])
block = []
for i in range(mult):
if i == 0 or include_powers:
block.append(np.polymul(current, suffix))
current = np.polymul(current, monomial)
factors.extend(reversed(block))
return factors, current
def _compute_residues(poles, multiplicity, numerator):
denominator_factors, _ = _compute_factors(poles, multiplicity)
numerator = numerator.astype(poles.dtype)
residues = []
for pole, mult, factor in zip(poles, multiplicity,
denominator_factors):
if mult == 1:
residues.append(np.polyval(numerator, pole) /
np.polyval(factor, pole))
else:
numer = numerator.copy()
monomial = np.array([1, -pole])
factor, d = np.polydiv(factor, monomial)
block = []
for _ in range(mult):
numer, n = np.polydiv(numer, monomial)
r = n[0] / d[0]
numer = np.polysub(numer, r * factor)
block.append(r)
residues.extend(reversed(block))
return np.asarray(residues)
def residue(b, a, tol=1e-3, rtype='avg'):
"""Compute partial-fraction expansion of b(s) / a(s).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `residuez`.
See Notes for details about the algorithm.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. See `unique_roots`
for further details.
rtype : {'avg', 'min', 'max'}, optional
Method for computing a root to represent a group of identical roots.
Default is 'avg'. See `unique_roots` for further details.
Returns
-------
r : ndarray
Residues corresponding to the poles. For repeated poles, the residues
are ordered to correspond to ascending by power fractions.
p : ndarray
Poles ordered by magnitude in ascending order.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, residuez, numpy.poly, unique_roots
Notes
-----
The "deflation through subtraction" algorithm is used for
computations --- method 6 in [1]_.
The form of partial fraction expansion depends on poles multiplicity in
the exact mathematical sense. However there is no way to exactly
determine multiplicity of roots of a polynomial in numerical computing.
Thus you should think of the result of `residue` with given `tol` as
partial fraction expansion computed for the denominator composed of the
computed poles with empirically determined multiplicity. The choice of
`tol` can drastically change the result if there are close poles.
References
----------
.. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a
review of computational methodology and efficiency", Journal of
Computational and Applied Mathematics, Vol. 9, 1983.
"""
b = np.asarray(b)
a = np.asarray(a)
if (np.issubdtype(b.dtype, np.complexfloating)
or np.issubdtype(a.dtype, np.complexfloating)):
b = b.astype(complex)
a = a.astype(complex)
else:
b = b.astype(float)
a = a.astype(float)
b = np.trim_zeros(np.atleast_1d(b), 'f')
a = np.trim_zeros(np.atleast_1d(a), 'f')
if a.size == 0:
raise ValueError("Denominator `a` is zero.")
poles = np.roots(a)
if b.size == 0:
return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])
if len(b) < len(a):
k = np.empty(0)
else:
k, b = np.polydiv(b, a)
unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)
unique_poles, order = cmplx_sort(unique_poles)
multiplicity = multiplicity[order]
residues = _compute_residues(unique_poles, multiplicity, b)
index = 0
for pole, mult in zip(unique_poles, multiplicity):
poles[index:index + mult] = pole
index += mult
return residues / a[0], poles, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""Compute partial-fraction expansion of b(z) / a(z).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `residue`.
See Notes of `residue` for details about the algorithm.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. See `unique_roots`
for further details.
rtype : {'avg', 'min', 'max'}, optional
Method for computing a root to represent a group of identical roots.
Default is 'avg'. See `unique_roots` for further details.
Returns
-------
r : ndarray
Residues corresponding to the poles. For repeated poles, the residues
are ordered to correspond to ascending by power fractions.
p : ndarray
Poles ordered by magnitude in ascending order.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invresz, residue, unique_roots
"""
b = np.asarray(b)
a = np.asarray(a)
if (np.issubdtype(b.dtype, np.complexfloating)
or np.issubdtype(a.dtype, np.complexfloating)):
b = b.astype(complex)
a = a.astype(complex)
else:
b = b.astype(float)
a = a.astype(float)
b = np.trim_zeros(np.atleast_1d(b), 'b')
a = np.trim_zeros(np.atleast_1d(a), 'b')
if a.size == 0:
raise ValueError("Denominator `a` is zero.")
elif a[0] == 0:
raise ValueError("First coefficient of determinant `a` must be "
"non-zero.")
poles = np.roots(a)
if b.size == 0:
return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])
b_rev = b[::-1]
a_rev = a[::-1]
if len(b_rev) < len(a_rev):
k_rev = np.empty(0)
else:
k_rev, b_rev = np.polydiv(b_rev, a_rev)
unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)
unique_poles, order = cmplx_sort(unique_poles)
multiplicity = multiplicity[order]
residues = _compute_residues(1 / unique_poles, multiplicity, b_rev)
index = 0
powers = np.empty(len(residues), dtype=int)
for pole, mult in zip(unique_poles, multiplicity):
poles[index:index + mult] = pole
powers[index:index + mult] = 1 + np.arange(mult)
index += mult
residues *= (-poles) ** powers / a_rev[0]
return residues, poles, k_rev[::-1]
def _group_poles(poles, tol, rtype):
if rtype in ['max', 'maximum']:
reduce = np.max
elif rtype in ['min', 'minimum']:
reduce = np.min
elif rtype in ['avg', 'mean']:
reduce = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
unique = []
multiplicity = []
pole = poles[0]
block = [pole]
for i in range(1, len(poles)):
if abs(poles[i] - pole) <= tol:
block.append(pole)
else:
unique.append(reduce(block))
multiplicity.append(len(block))
pole = poles[i]
block = [pole]
unique.append(reduce(block))
multiplicity.append(len(block))
return np.asarray(unique), np.asarray(multiplicity)
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(z) and a(z) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `invres`.
Parameters
----------
r : array_like
Residues corresponding to the poles. For repeated poles, the residues
must be ordered to correspond to ascending by power fractions.
p : array_like
Poles. Equal poles must be adjacent.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. See `unique_roots`
for further details.
rtype : {'avg', 'min', 'max'}, optional
Method for computing a root to represent a group of identical roots.
Default is 'avg'. See `unique_roots` for further details.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residuez, unique_roots, invres
"""
r = np.atleast_1d(r)
p = np.atleast_1d(p)
k = np.trim_zeros(np.atleast_1d(k), 'b')
unique_poles, multiplicity = _group_poles(p, tol, rtype)
factors, denominator = _compute_factors(unique_poles, multiplicity,
include_powers=True)
if len(k) == 0:
numerator = 0
else:
numerator = np.polymul(k[::-1], denominator[::-1])
for residue, factor in zip(r, factors):
numerator = np.polyadd(numerator, residue * factor[::-1])
return numerator[::-1], denominator
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the equally spaced sample
positions associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it is used solely to calculate the resampled
positions `resampled_t`
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fft.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = np.asarray(x)
Nx = x.shape[axis]
# Check if we can use faster real FFT
real_input = np.isrealobj(x)
# Forward transform
if real_input:
X = sp_fft.rfft(x, axis=axis)
else: # Full complex FFT
X = sp_fft.fft(x, axis=axis)
# Apply window to spectrum
if window is not None:
if callable(window):
W = window(sp_fft.fftfreq(Nx))
elif isinstance(window, np.ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = sp_fft.ifftshift(get_window(window, Nx))
newshape_W = [1] * x.ndim
newshape_W[axis] = X.shape[axis]
if real_input:
# Fold the window back on itself to mimic complex behavior
W_real = W.copy()
W_real[1:] += W_real[-1:0:-1]
W_real[1:] *= 0.5
X *= W_real[:newshape_W[axis]].reshape(newshape_W)
else:
X *= W.reshape(newshape_W)
# Copy each half of the original spectrum to the output spectrum, either
# truncating high frequences (downsampling) or zero-padding them
# (upsampling)
# Placeholder array for output spectrum
newshape = list(x.shape)
if real_input:
newshape[axis] = num // 2 + 1
else:
newshape[axis] = num
Y = np.zeros(newshape, X.dtype)
# Copy positive frequency components (and Nyquist, if present)
N = min(num, Nx)
nyq = N // 2 + 1 # Slice index that includes Nyquist if present
sl = [slice(None)] * x.ndim
sl[axis] = slice(0, nyq)
Y[tuple(sl)] = X[tuple(sl)]
if not real_input:
# Copy negative frequency components
if N > 2: # (slice expression doesn't collapse to empty array)
sl[axis] = slice(nyq - N, None)
Y[tuple(sl)] = X[tuple(sl)]
# Split/join Nyquist component(s) if present
# So far we have set Y[+N/2]=X[+N/2]
if N % 2 == 0:
if num < Nx: # downsampling
if real_input:
sl[axis] = slice(N//2, N//2 + 1)
Y[tuple(sl)] *= 2.
else:
# select the component of Y at frequency +N/2,
# add the component of X at -N/2
sl[axis] = slice(-N//2, -N//2 + 1)
Y[tuple(sl)] += X[tuple(sl)]
elif Nx < num: # upsampling
# select the component at frequency +N/2 and halve it
sl[axis] = slice(N//2, N//2 + 1)
Y[tuple(sl)] *= 0.5
if not real_input:
temp = Y[tuple(sl)]
# set the component at -N/2 equal to the component at +N/2
sl[axis] = slice(num-N//2, num-N//2 + 1)
Y[tuple(sl)] = temp
# Inverse transform
if real_input:
y = sp_fft.irfft(Y, num, axis=axis)
else:
y = sp_fft.ifft(Y, axis=axis, overwrite_x=True)
y *= (float(num) / float(Nx))
if t is None:
return y
else:
new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0),
padtype='constant', cval=None):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. By default, values beyond the boundary of the signal are assumed
to be zero during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string, tuple, or array_like, optional
Desired window to use to design the low-pass filter, or the FIR filter
coefficients to employ. See below for details.
padtype : string, optional
`constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of
the other signal extension modes supported by `scipy.signal.upfirdn`.
Changes assumptions on values beyond the boundary. If `constant`,
assumed to be `cval` (default zero). If `line` assumed to continue a
linear trend defined by the first and last points. `mean`, `median`,
`maximum` and `minimum` work as in `np.pad` and assume that the values
beyond the boundary are the mean, median, maximum or minimum
respectively of the array along the axis.
.. versionadded:: 1.4.0
cval : float, optional
Value to use if `padtype='constant'`. Default is zero.
.. versionadded:: 1.4.0
Returns
-------
resampled_x : array
The resampled array.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample : Resample up or down using the FFT method.
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The argument `window` specifies the FIR low-pass filter design.
If `window` is an array_like it is assumed to be the FIR filter
coefficients. Note that the FIR filter is applied after the upsampling
step, so it should be designed to operate on a signal at a sampling
frequency higher than the original by a factor of `up//gcd(up, down)`.
This function's output will be centered with respect to this array, so it
is best to pass a symmetric filter with an odd number of samples if, as
is usually the case, a zero-phase filter is desired.
For any other type of `window`, the functions `scipy.signal.get_window`
and `scipy.signal.firwin` are called to generate the appropriate filter
coefficients.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * down / float(up)``.
Examples
--------
By default, the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
This default behaviour can be changed by using the padtype option:
>>> import numpy as np
>>> from scipy import signal
>>> N = 5
>>> x = np.linspace(0, 1, N, endpoint=False)
>>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x)
>>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x)
>>> Y = np.stack([y, y2], axis=-1)
>>> up = 4
>>> xr = np.linspace(0, 1, N*up, endpoint=False)
>>> y2 = signal.resample_poly(Y, up, 1, padtype='constant')
>>> y3 = signal.resample_poly(Y, up, 1, padtype='mean')
>>> y4 = signal.resample_poly(Y, up, 1, padtype='line')
>>> import matplotlib.pyplot as plt
>>> for i in [0,1]:
... plt.figure()
... plt.plot(xr, y4[:,i], 'g.', label='line')
... plt.plot(xr, y3[:,i], 'y.', label='mean')
... plt.plot(xr, y2[:,i], 'r.', label='constant')
... plt.plot(x, Y[:,i], 'k-')
... plt.legend()
>>> plt.show()
"""
x = np.asarray(x)
if up != int(up):
raise ValueError("up must be an integer")
if down != int(down):
raise ValueError("down must be an integer")
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
if cval is not None and padtype != 'constant':
raise ValueError('cval has no effect when padtype is ', padtype)
# Determine our up and down factors
# Use a rational approximation to save computation time on really long
# signals
g_ = math.gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_in = x.shape[axis]
n_out = n_in * up
n_out = n_out // down + bool(n_out % down)
if isinstance(window, (list, np.ndarray)):
window = np.array(window) # use array to force a copy (we modify it)
if window.ndim > 1:
raise ValueError('window must be 1-D')
half_len = (window.size - 1) // 2
h = window
else:
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
h = firwin(2 * half_len + 1, f_c, window=window)
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, n_in,
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h,
np.zeros(n_post_pad, dtype=h.dtype)))
n_pre_remove_end = n_pre_remove + n_out
# Remove background depending on the padtype option
funcs = {'mean': np.mean, 'median': np.median,
'minimum': np.amin, 'maximum': np.amax}
upfirdn_kwargs = {'mode': 'constant', 'cval': 0}
if padtype in funcs:
background_values = funcs[padtype](x, axis=axis, keepdims=True)
elif padtype in _upfirdn_modes:
upfirdn_kwargs = {'mode': padtype}
if padtype == 'constant':
if cval is None:
cval = 0
upfirdn_kwargs['cval'] = cval
else:
raise ValueError(
'padtype must be one of: maximum, mean, median, minimum, ' +
', '.join(_upfirdn_modes))
if padtype in funcs:
x = x - background_values
# filter then remove excess
y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs)
keep = [slice(None), ]*x.ndim
keep[axis] = slice(n_pre_remove, n_pre_remove_end)
y_keep = y[tuple(keep)]
# Add background back
if padtype in funcs:
y_keep += background_values
return y_keep
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
:doi:`10.1063/1.3670512`.
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
:doi:`10.1007/s00422-013-0560-8`.
'''
events = np.asarray(events)
period = np.asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = np.atleast_2d(events)
period = np.atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = np.exp(np.dot(2j*np.pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = np.mean(vectors, axis=1)
strength = abs(vectormean)
phase = np.angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`. This parameter
only has an effect when ``type == 'linear'``.
overwrite_data : bool, optional
If True, perform in place detrending and avoid a copy. Default is False
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = np.asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - np.expand_dims(np.mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = np.sort(np.unique(np.r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = np.r_[axis, 0:axis, axis + 1:rnk]
newdata = np.reshape(np.transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
if not overwrite_data:
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = np.ones((Npts, 2), dtype)
A[:, 0] = np.cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - np.dot(A, coef)
# Put data back in original shape.
tdshape = np.take(dshape, newdims, 0)
ret = np.reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = np.transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Construct initial conditions for lfilter for step response steady-state.
Compute an initial state `zi` for the `lfilter` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Construct initial conditions for sosfilt for step response steady-state.
Compute an initial state `zi` for the `sosfilt` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
Apply a digital filter forward and backward to a signal.
This function applies a linear digital filter twice, once forward and
once backwards. The combined filter has zero phase and a filter order
twice that of the original.
The function provides options for handling the edges of the signal.
The function `sosfiltfilt` (and filter design using ``output='sos'``)
should be preferred over `filtfilt` for most filtering tasks, as
second-order sections have fewer numerical problems.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# method == "pad"
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=max(len(a), len(b)))
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be greater than "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def _validate_x(x):
x = np.asarray(x)
if x.ndim == 0:
raise ValueError('x must be at least 1-D')
return x
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections.
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = signal.unit_impulse(700)
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = _validate_x(x)
sos, n_sections = _validate_sos(sos)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
inputs = [sos, x]
if zi is not None:
inputs.append(np.asarray(zi))
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
if zi is not None:
zi = np.array(zi, dtype) # make a copy so that we can operate in place
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r, got %r.' %
(axis, x.shape, n_sections, x_zi_shape, zi.shape))
return_zi = True
else:
zi = np.zeros(x_zi_shape, dtype=dtype)
return_zi = False
axis = axis % x.ndim # make positive
x = np.moveaxis(x, axis, -1)
zi = np.moveaxis(zi, [0, axis + 1], [-2, -1])
x_shape, zi_shape = x.shape, zi.shape
x = np.reshape(x, (-1, x.shape[-1]))
x = np.array(x, dtype, order='C') # make a copy, can modify in place
zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2)))
sos = sos.astype(dtype, copy=False)
_sosfilt(sos, x, zi)
x.shape = x_shape
x = np.moveaxis(x, -1, axis)
if return_zi:
zi.shape = zi_shape
zi = np.moveaxis(zi, [-2, -1], [0, axis + 1])
out = (x, zi)
else:
out = x
return out
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward digital filter using cascaded second-order sections.
See `filtfilt` for more complete information about this method.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is::
3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
(sos[:, 5] == 0).sum()))
The extra subtraction at the end attempts to compensate for poles
and zeros at the origin (e.g. for odd-order filters) to yield
equivalent estimates of `padlen` to those of `filtfilt` for
second-order section filters built with `scipy.signal` functions.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
filtfilt, sosfilt, sosfilt_zi, sosfreqz
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy.signal import sosfiltfilt, butter
>>> import matplotlib.pyplot as plt
Create an interesting signal to filter.
>>> n = 201
>>> t = np.linspace(0, 1, n)
>>> np.random.seed(123)
>>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*np.random.randn(n)
Create a lowpass Butterworth filter, and use it to filter `x`.
>>> sos = butter(4, 0.125, output='sos')
>>> y = sosfiltfilt(sos, x)
For comparison, apply an 8th order filter using `sosfilt`. The filter
is initialized using the mean of the first four values of `x`.
>>> from scipy.signal import sosfilt, sosfilt_zi
>>> sos8 = butter(8, 0.125, output='sos')
>>> zi = x[:4].mean() * sosfilt_zi(sos8)
>>> y2, zo = sosfilt(sos8, x, zi=zi)
Plot the results. Note that the phase of `y` matches the input, while
`y2` has a significant phase delay.
>>> plt.plot(t, x, alpha=0.5, label='x(t)')
>>> plt.plot(t, y, label='y(t)')
>>> plt.plot(t, y2, label='y2(t)')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.xlabel('t')
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
x = _validate_x(x)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
"""
Downsample the signal after applying an anti-aliasing filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with Hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : array_like
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor. When using IIR downsampling, it is recommended
to call `decimate` multiple times for downsampling factors higher than
13.
n : int, optional
The order of the filter (1 less than the length for 'fir'). Defaults to
8 for 'iir' and 20 times the downsampling factor for 'fir'.
ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
of an `dlti` object, uses that object to filter before downsampling.
axis : int, optional
The axis along which to decimate.
zero_phase : bool, optional
Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
when using an IIR filter, and shifting the outputs back by the filter's
group delay when using an FIR filter. The default value of ``True`` is
recommended, since a phase shift is generally not desired.
.. versionadded:: 0.18.0
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample : Resample up or down using the FFT method.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The ``zero_phase`` keyword was added in 0.18.0.
The possibility to use instances of ``dlti`` as ``ftype`` was added in
0.18.0.
"""
x = np.asarray(x)
q = operator.index(q)
if n is not None:
n = operator.index(n)
if ftype == 'fir':
if n is None:
half_len = 10 * q # reasonable cutoff for our sinc-like function
n = 2 * half_len
b, a = firwin(n+1, 1. / q, window='hamming'), 1.
elif ftype == 'iir':
if n is None:
n = 8
system = dlti(*cheby1(n, 0.05, 0.8 / q))
b, a = system.num, system.den
elif isinstance(ftype, dlti):
system = ftype._as_tf() # Avoids copying if already in TF form
b, a = system.num, system.den
else:
raise ValueError('invalid ftype')
sl = [slice(None)] * x.ndim
a = np.asarray(a)
if a.size == 1: # FIR case
b = b / a
if zero_phase:
y = resample_poly(x, 1, q, axis=axis, window=b)
else:
# upfirdn is generally faster than lfilter by a factor equal to the
# downsampling factor, since it only calculates the needed outputs
n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
y = upfirdn(b, x, up=1, down=q, axis=axis)
sl[axis] = slice(None, n_out, None)
else: # IIR case
if zero_phase:
y = filtfilt(b, a, x, axis=axis)
else:
y = lfilter(b, a, x, axis=axis)
sl[axis] = slice(None, None, q)
return y[tuple(sl)]
|
arokem/scipy
|
scipy/signal/signaltools.py
|
Python
|
bsd-3-clause
| 145,702
|
# coding: utf-8
# PYTHON IMPORTS
import os
import ntpath
import posixpath
import shutil
# DJANGO IMPORTS
from django.conf import settings
from django.test import TestCase
from django.contrib.auth.models import User
from django.utils.encoding import filepath_to_uri
from django.template import Context, Template, TemplateSyntaxError
# FILEBROWSER IMPORTS
import filebrowser
from filebrowser.base import FileObject, FileListing
from filebrowser.templatetags.fb_versions import version, version_object, version_setting
from filebrowser.sites import site
TESTS_PATH = os.path.dirname(os.path.abspath(__file__))
FILEBROWSER_PATH = os.path.split(TESTS_PATH)[0]
class VersionTemplateTagsTests(TestCase):
def setUp(self):
"""
Save original values/functions so they can be restored in tearDown
"""
self.original_path = filebrowser.base.os.path
self.original_directory = site.directory
self.original_versions_basedir = filebrowser.base.VERSIONS_BASEDIR
self.original_versions = filebrowser.base.VERSIONS
self.original_admin_versions = filebrowser.base.ADMIN_VERSIONS
# DIRECTORY
# custom directory because this could be set with sites
# and we cannot rely on filebrowser.settings
# FIXME: find better directory name
self.directory = "fb_test_directory/"
self.directory_path = os.path.join(site.storage.location, self.directory)
if os.path.exists(self.directory_path):
self.fail("Test directory already exists.")
else:
os.makedirs(self.directory_path)
# set site directory
site.directory = self.directory
# VERSIONS
self.versions = "_versionstestdirectory"
self.versions_path = os.path.join(site.storage.location, self.versions)
if os.path.exists(self.versions_path):
self.fail("Versions directory already exists.")
else:
os.makedirs(self.versions_path)
# create temporary test folder and move testimage
# FIXME: find better path names
self.tmpdir_name = os.path.join("fb_tmp_dir", "fb_tmp_dir_sub")
self.tmpdir_path = os.path.join(site.storage.location, self.directory, self.tmpdir_name)
if os.path.exists(self.tmpdir_path):
self.fail("Temporary testfolder already exists.")
else:
os.makedirs(self.tmpdir_path)
# copy test image to temporary test folder
self.image_path = os.path.join(FILEBROWSER_PATH, "static", "filebrowser", "img", "testimage.jpg")
if not os.path.exists(self.image_path):
self.fail("Testimage not found.")
shutil.copy(self.image_path, self.tmpdir_path)
# set posixpath
filebrowser.base.os.path = posixpath
# fileobjects
self.f_image = FileObject(os.path.join(self.directory, self.tmpdir_name, "testimage.jpg"), site=site)
self.f_image_not_exists = FileObject(os.path.join(self.directory, self.tmpdir_name, "testimage_does_not_exist.jpg"), site=site)
self.f_folder = FileObject(os.path.join(self.directory, self.tmpdir_name), site=site)
def test_version(self):
"""
Templatetag version
"""
# new settings
filebrowser.base.VERSIONS_BASEDIR = "fb_test_directory/_versions"
filebrowser.base.VERSIONS = {
'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},
'large': {'verbose_name': 'Large', 'width': 600, 'height': '', 'opts': ''},
}
filebrowser.base.ADMIN_VERSIONS = ['large']
filebrowser.settings.VERSIONS = filebrowser.base.VERSIONS
filebrowser.templatetags.fb_versions.VERSIONS = filebrowser.base.VERSIONS
# templatetag version with wrong token
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version obj.path %}'))
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version %}'))
# templatetag version without path
t = Template('{% load fb_versions %}{% version obj "medium" %}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(r, "") # FIXME: should this throw an error?
# templatetag version with hardcoded path
t = Template('{% load fb_versions %}{% version path "large" %}')
c = Context({"obj": self.f_image, "path": "fb_test_directory/fb_tmp_dir/fb_tmp_dir_sub/testimage.jpg"})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version with obj
t = Template('{% load fb_versions %}{% version obj "large" %}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version with obj.path
t = Template('{% load fb_versions %}{% version obj.path "large" %}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version with suffix as variable
t = Template('{% load fb_versions %}{% version obj.path suffix %}')
c = Context({"obj": self.f_image, "suffix": "large"})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# # FIXME: templatetag version with non-existing path
# t = Template('{% load fb_versions %}{% version path "large" %}')
# c = Context({"obj": self.f_image, "path": "fb_test_directory/fb_tmp_dir/fb_tmp_dir_sub/testimagexxx.jpg"})
# r = t.render(c)
# self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# FIXME: test placeholder
def test_version_object(self):
"""
Templatetag version_object
"""
# new settings
filebrowser.base.VERSIONS_BASEDIR = "fb_test_directory/_versions"
filebrowser.base.VERSIONS = {
'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},
'large': {'verbose_name': 'Large', 'width': 600, 'height': '', 'opts': ''},
}
filebrowser.base.ADMIN_VERSIONS = ['large']
filebrowser.settings.VERSIONS = filebrowser.base.VERSIONS
filebrowser.templatetags.fb_versions.VERSIONS = filebrowser.base.VERSIONS
# templatetag with wrong token
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version_object obj.path %}'))
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version_object %}'))
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version_object obj.path "medium" %}'))
# templatetag version_object with hardcoded path
t = Template('{% load fb_versions %}{% version_object path "large" as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image, "path": "fb_test_directory/fb_tmp_dir/fb_tmp_dir_sub/testimage.jpg"})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version_object with obj.path
t = Template('{% load fb_versions %}{% version_object obj.path "large" as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version_object with obj
t = Template('{% load fb_versions %}{% version_object obj "large" as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version_object with suffix as variable
t = Template('{% load fb_versions %}{% version_object obj suffix as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image, "suffix": "large"})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# # FIXME: templatetag version with non-existing path
# t = Template('{% load fb_versions %}{% version_object path "large" as version_large %}{{ version_large.url }}')
# c = Context({"obj": self.f_image, "path": "fb_test_directory/fb_tmp_dir/fb_tmp_dir_sub/testimagexxx.jpg"})
# r = t.render(c)
# self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# FIXME: test placeholder
def test_version_setting(self):
pass
def tearDown(self):
"""
Restore original values/functions
"""
filebrowser.base.os.path = self.original_path
site.directory = self.original_directory
filebrowser.base.VERSIONS_BASEDIR = self.original_versions_basedir
filebrowser.base.VERSIONS = self.original_versions
filebrowser.settings.VERSIONS = self.original_versions
filebrowser.templatetags.fb_versions.VERSIONS = self.original_versions
filebrowser.base.ADMIN_VERSIONS = self.original_admin_versions
# remove temporary directory and test folder
shutil.rmtree(self.directory_path)
shutil.rmtree(self.versions_path)
|
deschler/django-filebrowser
|
filebrowser/tests/test_versions.py
|
Python
|
bsd-3-clause
| 10,848
|
import asyncio
import difflib
import json
import posixpath
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy, deepcopy
from difflib import get_close_matches
from functools import wraps
from unittest.suite import _DebugResult
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse,
)
from urllib.request import url2pathname
from asgiref.sync import async_to_sync
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import AsyncClient, Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils.deprecation import RemovedInDjango41Warning
from django.utils.functional import classproperty
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Put value into a list if it's not already one. Return an empty list if
value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
'%d. %s' % (i, query['sql']) for i, query in enumerate(self.captured_queries, start=1)
)
)
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if self.rendered_templates:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names)
)
else:
message += ' No template was rendered.'
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _DatabaseFailure:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise AssertionError(self.message)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
async_client_class = AsyncClient
_overridden_settings = None
_modified_settings = None
databases = set()
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in SimpleTestCase '
'subclasses. Either subclass TestCase or TransactionTestCase to ensure '
'proper test isolation or add %(alias)r to %(test)s.databases to silence '
'this failure.'
)
_disallowed_connection_methods = [
('connect', 'connections'),
('temporary_connection', 'connections'),
('cursor', 'queries'),
('chunked_cursor', 'queries'),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
cls._add_databases_failures()
@classmethod
def _validate_databases(cls):
if cls.databases == '__all__':
return frozenset(connections)
for alias in cls.databases:
if alias not in connections:
message = '%s.%s.databases refers to %r which is not defined in settings.DATABASES.' % (
cls.__module__,
cls.__qualname__,
alias,
)
close_matches = get_close_matches(alias, list(connections))
if close_matches:
message += ' Did you mean %r?' % close_matches[0]
raise ImproperlyConfigured(message)
return frozenset(cls.databases)
@classmethod
def _add_databases_failures(cls):
cls.databases = cls._validate_databases()
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, operation in cls._disallowed_connection_methods:
message = cls._disallowed_database_msg % {
'test': '%s.%s' % (cls.__module__, cls.__qualname__),
'alias': alias,
'operation': operation,
}
method = getattr(connection, name)
setattr(connection, name, _DatabaseFailure(method, message))
@classmethod
def _remove_databases_failures(cls):
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, _ in cls._disallowed_connection_methods:
method = getattr(connection, name)
setattr(connection, name, method.wrapped)
@classmethod
def tearDownClass(cls):
cls._remove_databases_failures()
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super().tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self._setup_and_call(result)
def debug(self):
"""Perform the same as __call__(), without catching the exception."""
debug_result = _DebugResult()
self._setup_and_call(debug_result, debug=True)
def _setup_and_call(self, result, debug=False):
"""
Perform the following in order: pre-setup, run test, post-teardown,
skipping pre/post hooks if test is set to be skipped.
If debug=True, reraise any errors in setup and use super().debug()
instead of __call__() to run the test.
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
# Convert async test methods.
if asyncio.iscoroutinefunction(testMethod):
setattr(self, self._testMethodName, async_to_sync(testMethod))
if not skipped:
try:
self._pre_setup()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
if debug:
super().debug()
else:
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
self.async_client = self.async_client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, msg_prefix='',
fetch_redirect_response=True):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
# Get the redirection page, using the same client that was used
# to obtain the original response.
extra = response.client.extra or {}
redirect_response = response.client.get(
path,
QueryDict(query),
secure=(scheme == 'https'),
**extra,
)
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
self.assertURLEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def assertURLEqual(self, url1, url2, msg_prefix=''):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse((scheme, netloc, path, params, urlencode(query_parts), fragment))
self.assertEqual(
normalize(url1), normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Assert that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors or 'none')
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Assert that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
not non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
not non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Assert that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Assert that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_or_warns_cm(self, func, cm_attr, expected_exception, expected_message):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(self, func, cm_attr, expected_exception, expected_message, *args, **kwargs):
callable_obj = None
if args:
callable_obj, *args = args
cm = self._assert_raises_or_warns_cm(func, cm_attr, expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Assert that expected_message is found in the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises, 'exception', expected_exception, expected_message,
*args, **kwargs
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns, 'warning', expected_warning, expected_message,
*args, **kwargs
)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, 'required': False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages['required']]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
str(dom1).splitlines(), str(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
databases = {DEFAULT_DB_ALIAS}
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in this test. '
'Add %(alias)r to %(test)s.databases to ensure proper test isolation '
'and silence this failure.'
)
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# Only consider allowed database aliases, including mirrors or not.
return [
alias for alias in connections
if alias in cls.databases and (
include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
)
]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# Provide replica initial data from migrated apps, if needed.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=None, ordered=True, msg=None):
values = list(values)
# RemovedInDjango41Warning.
if transform is None:
if (
values and isinstance(values[0], str) and
qs and not isinstance(qs[0], str)
):
# Transform qs using repr() if the first element of values is a
# string and the first element of qs is not (which would be the
# case if qs is a flattened values_list).
warnings.warn(
"In Django 4.1, repr() will not be called automatically "
"on a queryset when compared to string values. Set an "
"explicit 'transform' to silence this warning.",
category=RemovedInDjango41Warning,
stacklevel=2,
)
transform = repr
items = qs
if transform is not None:
items = map(transform, items)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions(aliases=None):
"""
Return whether or not all (or specified) connections support
transactions.
"""
conns = connections.all() if aliases is None else (connections[alias] for alias in aliases)
return all(conn.features.supports_transactions for conn in conns)
class TestData:
"""
Descriptor to provide TestCase instance isolation for attributes assigned
during the setUpTestData() phase.
Allow safe alteration of objects assigned in setUpTestData() by test
methods by exposing deep copies instead of the original objects.
Objects are deep copied using a memo kept on the test case instance in
order to maintain their original relationships.
"""
memo_attr = '_testdata_memo'
def __init__(self, name, data):
self.name = name
self.data = data
def get_memo(self, testcase):
try:
memo = getattr(testcase, self.memo_attr)
except AttributeError:
memo = {}
setattr(testcase, self.memo_attr, memo)
return memo
def __get__(self, instance, owner):
if instance is None:
return self.data
memo = self.get_memo(instance)
try:
data = deepcopy(self.data, memo)
except TypeError:
# RemovedInDjango41Warning.
msg = (
"Assigning objects which don't support copy.deepcopy() during "
"setUpTestData() is deprecated. Either assign the %s "
"attribute during setUpClass() or setUp(), or add support for "
"deepcopy() to %s.%s.%s."
) % (
self.name,
owner.__module__,
owner.__qualname__,
self.name,
)
warnings.warn(msg, category=RemovedInDjango41Warning, stacklevel=2)
data = self.data
setattr(instance, self.name, data)
return data
def __repr__(self):
return '<TestData: name=%r, data=%r>' % (self.name, self.data)
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not cls._databases_support_transactions():
return
# Disable the durability check to allow testing durable atomic blocks
# in a transaction for performance reasons.
transaction.Atomic._ensure_durability = False
try:
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{'verbosity': 0, 'database': db_name})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
pre_attrs = cls.__dict__.copy()
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
for name, value in cls.__dict__.items():
if value is not pre_attrs.get(name):
setattr(cls, name, TestData(name, value))
except Exception:
transaction.Atomic._ensure_durability = True
raise
@classmethod
def tearDownClass(cls):
transaction.Atomic._ensure_durability = True
if cls._databases_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not self._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
@classmethod
@contextmanager
def captureOnCommitCallbacks(cls, *, using=DEFAULT_DB_ALIAS, execute=False):
"""Context manager to capture transaction.on_commit() callbacks."""
callbacks = []
start_count = len(connections[using].run_on_commit)
try:
yield callbacks
finally:
run_on_commit = connections[using].run_on_commit[start_count:]
callbacks[:] = [func for sids, func in run_on_commit]
if execute:
for callback in callbacks:
callback()
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason, name):
def decorator(test_func):
nonlocal condition
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if (args and isinstance(args[0], unittest.TestCase) and
connection.alias not in getattr(args[0], 'databases', {})):
raise ValueError(
"%s cannot be used on %s as %s doesn't allow queries "
"against the %r database." % (
name,
args[0],
args[0].__class__.__qualname__,
connection.alias,
)
)
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
databases = getattr(test_item, 'databases', None)
if not databases or connection.alias not in databases:
# Defer raising to allow importing test class's module.
def condition():
raise ValueError(
"%s cannot be used on %s as it doesn't allow queries "
"against the '%s' database." % (
name, test_item, connection.alias,
)
)
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get('__unittest_skip__')
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features),
'skipIfDBFeature',
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features),
'skipUnlessDBFeature',
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features),
'skipUnlessAnyDBFeature',
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live http server while the tests are running."""
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self):
return ThreadedWSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def setUpClass(cls):
super().setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
# Explicitly enable thread-shareability for this connection
conn.inc_thread_sharing()
connections_override[conn.alias] = conn
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={'append': cls.allowed_host},
)
cls._live_server_modified_settings.enable()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
# Restore sqlite in-memory database connections' non-shareability.
for conn in cls.server_thread.connections_override.values():
conn.dec_thread_sharing()
cls._live_server_modified_settings.disable()
super().tearDownClass()
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._lockfile.close()
|
wkschwartz/django
|
django/test/testcases.py
|
Python
|
bsd-3-clause
| 64,617
|
def isPrime(num):
x = 2
if num == 2:
return True
while num/2 >= x:
if num % x == 0:
return False
else:
x = x + 1
return True
def isComposite(num):
if isPrime(num):
return False
else:
return True
def findPrimeFactor(num):
x = 2
while(num > x):
if isPrime(x) and num % x == 0:
return x
else:
x = x + 1
return x
def findPrimesTill(maximum):
primes = []
for i in range(1, maximum + 1):
if isPrime(i):
primes.append(i)
return primes
def findPrimesAmt(maximum):
primes = []
i = 1
counter = 1
while(counter <= maximum):
if isPrime(i):
primes.append(i)
counter = counter + 1
i = i + 1
return primes
|
ProgrammerKid/euler
|
py/primes.py
|
Python
|
bsd-3-clause
| 655
|
"""Tests for computational algebraic number field theory. """
from sympy import S, Rational, Symbol, Poly, sin, sqrt, I, oo
from sympy.utilities.pytest import raises
from sympy.polys.numberfields import (
minimal_polynomial,
primitive_element,
is_isomorphism_possible,
field_isomorphism_pslq,
field_isomorphism,
to_number_field,
AlgebraicNumber,
)
from sympy.polys.polyerrors import (
IsomorphismFailed,
NotAlgebraic,
)
from sympy.polys.polyclasses import DMP
from sympy.polys.algebratools import QQ
from sympy.abc import x, y
Q = Rational
def test_minimal_polynomial():
assert minimal_polynomial(-7, x) == x + 7
assert minimal_polynomial(-1, x) == x + 1
assert minimal_polynomial( 0, x) == x
assert minimal_polynomial( 1, x) == x - 1
assert minimal_polynomial( 7, x) == x - 7
assert minimal_polynomial(sqrt(2), x) == x**2 - 2
assert minimal_polynomial(sqrt(5), x) == x**2 - 5
assert minimal_polynomial(sqrt(6), x) == x**2 - 6
assert minimal_polynomial(2*sqrt(2), x) == x**2 - 8
assert minimal_polynomial(3*sqrt(5), x) == x**2 - 45
assert minimal_polynomial(4*sqrt(6), x) == x**2 - 96
assert minimal_polynomial(2*sqrt(2) + 3, x) == x**2 - 6*x + 1
assert minimal_polynomial(3*sqrt(5) + 6, x) == x**2 - 12*x - 9
assert minimal_polynomial(4*sqrt(6) + 7, x) == x**2 - 14*x - 47
assert minimal_polynomial(2*sqrt(2) - 3, x) == x**2 + 6*x + 1
assert minimal_polynomial(3*sqrt(5) - 6, x) == x**2 + 12*x - 9
assert minimal_polynomial(4*sqrt(6) - 7, x) == x**2 + 14*x - 47
assert minimal_polynomial(sqrt(1 + sqrt(6)), x) == x**4 - 2*x**2 - 5
assert minimal_polynomial(sqrt(I + sqrt(6)), x) == x**8 - 10*x**4 + 49
assert minimal_polynomial(2*I + sqrt(2 + I), x) == x**4 + 4*x**2 + 8*x + 37
assert minimal_polynomial(sqrt(2) + sqrt(3), x) == x**4 - 10*x**2 + 1
assert minimal_polynomial(sqrt(2) + sqrt(3) + sqrt(6), x) == x**4 - 22*x**2 - 48*x - 23
a = 1 - 9*sqrt(2) + 7*sqrt(3)
assert minimal_polynomial(1/a, x) == 392*x**4 - 1232*x**3 + 612*x**2 + 4*x - 1
assert minimal_polynomial(1/sqrt(a), x) == 392*x**8 - 1232*x**6 + 612*x**4 + 4*x**2 - 1
raises(NotAlgebraic, "minimal_polynomial(y, x)")
raises(NotAlgebraic, "minimal_polynomial(oo, x)")
raises(NotAlgebraic, "minimal_polynomial(2**y, x)")
raises(NotAlgebraic, "minimal_polynomial(sin(1), x)")
assert minimal_polynomial(sqrt(2), polys=True).is_Poly == True
assert minimal_polynomial(sqrt(2), x, polys=True) == Poly(x**2 - 2)
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
assert minimal_polynomial(a, x) == x**2 - 2
assert minimal_polynomial(b, x) == x**2 - 3
assert minimal_polynomial(a, x, polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(b, x, polys=True) == Poly(x**2 - 3)
assert minimal_polynomial(sqrt(a/2 + 17), x) == 2*x**4 - 68*x**2 + 577
assert minimal_polynomial(sqrt(b/2 + 17), x) == 4*x**4 - 136*x**2 + 1153
a, b = sqrt(2)/3 + 7, AlgebraicNumber(sqrt(2)/3 + 7)
f = 81*x**8 - 2268*x**6 - 4536*x**5 + 22644*x**4 + 63216*x**3 - 31608*x**2 - 189648*x + 141358
assert minimal_polynomial(sqrt(a) + sqrt(sqrt(a)), x) == f
assert minimal_polynomial(sqrt(b) + sqrt(sqrt(b)), x) == f
assert minimal_polynomial(a**Rational(3, 2), x) == 729*x**4 - 506898*x**2 + 84604519
def test_primitive_element():
assert primitive_element([sqrt(2)], x) == (x**2 - 2, [1])
assert primitive_element([sqrt(2), sqrt(3)], x) == (x**4 - 10*x**2 + 1, [1, 1])
assert primitive_element([sqrt(2)], x, polys=True) == (Poly(x**2 - 2), [1])
assert primitive_element([sqrt(2), sqrt(3)], x, polys=True) == (Poly(x**4 - 10*x**2 + 1), [1, 1])
assert primitive_element([sqrt(2)], x, ex=True) == (x**2 - 2, [1], [[1, 0]])
assert primitive_element([sqrt(2), sqrt(3)], x, ex=True) == \
(x**4 - 10*x**2 + 1, [1, 1], [[Q(1,2), 0, -Q(9,2), 0], [-Q(1,2), 0, Q(11,2), 0]])
assert primitive_element([sqrt(2)], x, ex=True, polys=True) == (Poly(x**2 - 2), [1], [[1, 0]])
assert primitive_element([sqrt(2), sqrt(3)], x, ex=True, polys=True) == \
(Poly(x**4 - 10*x**2 + 1), [1, 1], [[Q(1,2), 0, -Q(9,2), 0], [-Q(1,2), 0, Q(11,2), 0]])
raises(ValueError, "primitive_element([], x, ex=False)")
raises(ValueError, "primitive_element([], x, ex=True)")
def test_field_isomorphism_pslq():
a = AlgebraicNumber(I)
b = AlgebraicNumber(I*sqrt(3))
raises(NotImplementedError, "field_isomorphism_pslq(a, b)")
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
c = AlgebraicNumber(sqrt(7))
d = AlgebraicNumber(sqrt(2)+sqrt(3))
e = AlgebraicNumber(sqrt(2)+sqrt(3)+sqrt(7))
assert field_isomorphism_pslq(a, a) == [1, 0]
assert field_isomorphism_pslq(a, b) == None
assert field_isomorphism_pslq(a, c) == None
assert field_isomorphism_pslq(a, d) == [Q(1,2), 0, -Q(9,2), 0]
assert field_isomorphism_pslq(a, e) == [Q(1,80), 0, -Q(1,2), 0, Q(59,20), 0]
assert field_isomorphism_pslq(b, a) == None
assert field_isomorphism_pslq(b, b) == [1, 0]
assert field_isomorphism_pslq(b, c) == None
assert field_isomorphism_pslq(b, d) == [-Q(1,2), 0, Q(11,2), 0]
assert field_isomorphism_pslq(b, e) == [-Q(3,640), 0, Q(67,320), 0, -Q(297,160), 0, Q(313,80), 0]
assert field_isomorphism_pslq(c, a) == None
assert field_isomorphism_pslq(c, b) == None
assert field_isomorphism_pslq(c, c) == [1, 0]
assert field_isomorphism_pslq(c, d) == None
assert field_isomorphism_pslq(c, e) == [Q(3,640), 0, -Q(71,320), 0, Q(377,160), 0, -Q(469,80), 0]
assert field_isomorphism_pslq(d, a) == None
assert field_isomorphism_pslq(d, b) == None
assert field_isomorphism_pslq(d, c) == None
assert field_isomorphism_pslq(d, d) == [1, 0]
assert field_isomorphism_pslq(d, e) == [-Q(3,640), 0, Q(71,320), 0, -Q(377,160), 0, Q(549,80), 0]
assert field_isomorphism_pslq(e, a) == None
assert field_isomorphism_pslq(e, b) == None
assert field_isomorphism_pslq(e, c) == None
assert field_isomorphism_pslq(e, d) == None
assert field_isomorphism_pslq(e, e) == [1, 0]
f = AlgebraicNumber(3*sqrt(2)+8*sqrt(7)-5)
assert field_isomorphism_pslq(f, e) == [Q(3,80), 0, -Q(139,80), 0, Q(347,20), 0, -Q(761,20), -5]
def test_field_isomorphism():
assert field_isomorphism(3, sqrt(2)) == [3]
assert field_isomorphism( I*sqrt(3), I*sqrt(3)/2) == [ 2, 0]
assert field_isomorphism(-I*sqrt(3), I*sqrt(3)/2) == [-2, 0]
assert field_isomorphism( I*sqrt(3),-I*sqrt(3)/2) == [-2, 0]
assert field_isomorphism(-I*sqrt(3),-I*sqrt(3)/2) == [ 2, 0]
assert field_isomorphism( 2*I*sqrt(3)/7, 5*I*sqrt(3)/3) == [ S(6)/35, 0]
assert field_isomorphism(-2*I*sqrt(3)/7, 5*I*sqrt(3)/3) == [-S(6)/35, 0]
assert field_isomorphism( 2*I*sqrt(3)/7,-5*I*sqrt(3)/3) == [-S(6)/35, 0]
assert field_isomorphism(-2*I*sqrt(3)/7,-5*I*sqrt(3)/3) == [ S(6)/35, 0]
assert field_isomorphism( 2*I*sqrt(3)/7+27, 5*I*sqrt(3)/3) == [ S(6)/35, 27]
assert field_isomorphism(-2*I*sqrt(3)/7+27, 5*I*sqrt(3)/3) == [-S(6)/35, 27]
assert field_isomorphism( 2*I*sqrt(3)/7+27,-5*I*sqrt(3)/3) == [-S(6)/35, 27]
assert field_isomorphism(-2*I*sqrt(3)/7+27,-5*I*sqrt(3)/3) == [ S(6)/35, 27]
p = AlgebraicNumber( sqrt(2) + sqrt(3))
q = AlgebraicNumber(-sqrt(2) + sqrt(3))
r = AlgebraicNumber( sqrt(2) - sqrt(3))
s = AlgebraicNumber(-sqrt(2) - sqrt(3))
pos_coeffs = [ S(1)/2, S(0), -S(9)/2, S(0)]
neg_coeffs = [-S(1)/2, S(0), S(9)/2, S(0)]
a = AlgebraicNumber(sqrt(2))
assert is_isomorphism_possible(a, p) == True
assert is_isomorphism_possible(a, q) == True
assert is_isomorphism_possible(a, r) == True
assert is_isomorphism_possible(a, s) == True
assert field_isomorphism(a, p, fast=True) == pos_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == neg_coeffs
assert field_isomorphism(a, p, fast=False) == pos_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == neg_coeffs
a = AlgebraicNumber(-sqrt(2))
assert is_isomorphism_possible(a, p) == True
assert is_isomorphism_possible(a, q) == True
assert is_isomorphism_possible(a, r) == True
assert is_isomorphism_possible(a, s) == True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == pos_coeffs
assert field_isomorphism(a, r, fast=True) == neg_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == pos_coeffs
assert field_isomorphism(a, r, fast=False) == neg_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
pos_coeffs = [ S(1)/2, S(0), -S(11)/2, S(0)]
neg_coeffs = [-S(1)/2, S(0), S(11)/2, S(0)]
a = AlgebraicNumber(sqrt(3))
assert is_isomorphism_possible(a, p) == True
assert is_isomorphism_possible(a, q) == True
assert is_isomorphism_possible(a, r) == True
assert is_isomorphism_possible(a, s) == True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
a = AlgebraicNumber(-sqrt(3))
assert is_isomorphism_possible(a, p) == True
assert is_isomorphism_possible(a, q) == True
assert is_isomorphism_possible(a, r) == True
assert is_isomorphism_possible(a, s) == True
assert field_isomorphism(a, p, fast=True) == pos_coeffs
assert field_isomorphism(a, q, fast=True) == pos_coeffs
assert field_isomorphism(a, r, fast=True) == neg_coeffs
assert field_isomorphism(a, s, fast=True) == neg_coeffs
assert field_isomorphism(a, p, fast=False) == pos_coeffs
assert field_isomorphism(a, q, fast=False) == pos_coeffs
assert field_isomorphism(a, r, fast=False) == neg_coeffs
assert field_isomorphism(a, s, fast=False) == neg_coeffs
pos_coeffs = [ S(3)/2, S(0), -S(33)/2, -S(8)]
neg_coeffs = [-S(3)/2, S(0), S(33)/2, -S(8)]
a = AlgebraicNumber(3*sqrt(3)-8)
assert is_isomorphism_possible(a, p) == True
assert is_isomorphism_possible(a, q) == True
assert is_isomorphism_possible(a, r) == True
assert is_isomorphism_possible(a, s) == True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
a = AlgebraicNumber(3*sqrt(2)+2*sqrt(3)+1)
pos_1_coeffs = [ S(1)/2, S(0), -S(5)/2, S(1)]
neg_5_coeffs = [-S(5)/2, S(0), S(49)/2, S(1)]
pos_5_coeffs = [ S(5)/2, S(0), -S(49)/2, S(1)]
neg_1_coeffs = [-S(1)/2, S(0), S(5)/2, S(1)]
assert is_isomorphism_possible(a, p) == True
assert is_isomorphism_possible(a, q) == True
assert is_isomorphism_possible(a, r) == True
assert is_isomorphism_possible(a, s) == True
assert field_isomorphism(a, p, fast=True) == pos_1_coeffs
assert field_isomorphism(a, q, fast=True) == neg_5_coeffs
assert field_isomorphism(a, r, fast=True) == pos_5_coeffs
assert field_isomorphism(a, s, fast=True) == neg_1_coeffs
assert field_isomorphism(a, p, fast=False) == pos_1_coeffs
assert field_isomorphism(a, q, fast=False) == neg_5_coeffs
assert field_isomorphism(a, r, fast=False) == pos_5_coeffs
assert field_isomorphism(a, s, fast=False) == neg_1_coeffs
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
c = AlgebraicNumber(sqrt(7))
assert is_isomorphism_possible(a, b) == True
assert is_isomorphism_possible(b, a) == True
assert is_isomorphism_possible(c, p) == False
assert field_isomorphism(sqrt(2), sqrt(3), fast=True) is None
assert field_isomorphism(sqrt(3), sqrt(2), fast=True) is None
assert field_isomorphism(sqrt(2), sqrt(3), fast=False) is None
assert field_isomorphism(sqrt(3), sqrt(2), fast=False) is None
def test_to_number_field():
assert to_number_field(sqrt(2)) == AlgebraicNumber(sqrt(2))
assert to_number_field([sqrt(2), sqrt(3)]) == AlgebraicNumber(sqrt(2)+sqrt(3))
a = AlgebraicNumber(sqrt(2)+sqrt(3), [S(1)/2, S(0), -S(9)/2, S(0)])
assert to_number_field(sqrt(2), sqrt(2)+sqrt(3)) == a
assert to_number_field(sqrt(2), AlgebraicNumber(sqrt(2)+sqrt(3))) == a
raises(IsomorphismFailed, "to_number_field(sqrt(2), sqrt(3))")
def test_AlgebraicNumber():
minpoly, root = x**2 - 2, sqrt(2)
a = AlgebraicNumber(root, gen=x)
assert a.rep == DMP([QQ(1),QQ(0)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_aliased == False
assert a.coeffs() == [S(1), S(0)]
assert a.native_coeffs() == [QQ(1), QQ(0)]
a = AlgebraicNumber(root, gen=x, alias='y')
assert a.rep == DMP([QQ(1),QQ(0)], QQ)
assert a.root == root
assert a.alias == Symbol('y')
assert a.minpoly == minpoly
assert a.is_aliased == True
a = AlgebraicNumber(root, gen=x, alias=Symbol('y'))
assert a.rep == DMP([QQ(1),QQ(0)], QQ)
assert a.root == root
assert a.alias == Symbol('y')
assert a.minpoly == minpoly
assert a.is_aliased == True
assert AlgebraicNumber(sqrt(2), []).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), [8]).rep == DMP([QQ(8)], QQ)
assert AlgebraicNumber(sqrt(2), [S(8)/3]).rep == DMP([QQ(8,3)], QQ)
assert AlgebraicNumber(sqrt(2), [7, 3]).rep == DMP([QQ(7),QQ(3)], QQ)
assert AlgebraicNumber(sqrt(2), [S(7)/9, S(3)/2]).rep == DMP([QQ(7,9),QQ(3,2)], QQ)
assert AlgebraicNumber(sqrt(2), [1, 2, 3]).rep == DMP([QQ(2),QQ(5)], QQ)
a = AlgebraicNumber(AlgebraicNumber(root, gen=x), [1,2])
assert a.rep == DMP([QQ(1),QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_aliased == False
assert a.coeffs() == [S(1), S(2)]
assert a.native_coeffs() == [QQ(1), QQ(2)]
a = AlgebraicNumber((minpoly, root), [1,2])
assert a.rep == DMP([QQ(1),QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_aliased == False
a = AlgebraicNumber((Poly(minpoly), root), [1,2])
assert a.rep == DMP([QQ(1),QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_aliased == False
assert AlgebraicNumber( sqrt(3)).rep == DMP([ QQ(1),QQ(0)], QQ)
assert AlgebraicNumber(-sqrt(3)).rep == DMP([-QQ(1),QQ(0)], QQ)
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(2))
assert a == b and a == sqrt(2)
a = AlgebraicNumber(sqrt(2), gen=x)
b = AlgebraicNumber(sqrt(2), gen=x)
assert a == b and a == sqrt(2)
a = AlgebraicNumber(sqrt(2), [1,2])
b = AlgebraicNumber(sqrt(2), [1,3])
assert a != b and a != sqrt(2)+3
assert (a == x) == False and (a != x) == True
a = AlgebraicNumber(sqrt(2), [1,0])
b = AlgebraicNumber(sqrt(2), [1,0], alias=y)
assert a.as_poly(x) == Poly(x)
assert b.as_poly() == Poly(y)
assert a.as_basic() == sqrt(2)
assert a.as_basic(x) == x
assert b.as_basic() == sqrt(2)
assert b.as_basic(x) == x
a = AlgebraicNumber(sqrt(2), [2,3])
b = AlgebraicNumber(sqrt(2), [2,3], alias=y)
p = a.as_poly()
assert p == Poly(2*p.gen+3)
assert a.as_poly(x) == Poly(2*x+3)
assert b.as_poly() == Poly(2*y+3)
assert a.as_basic() == 2*sqrt(2)+3
assert a.as_basic(x) == 2*x+3
assert b.as_basic() == 2*sqrt(2)+3
assert b.as_basic(x) == 2*x+3
def test_to_algebraic_integer():
a = AlgebraicNumber(sqrt(3), gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 3
assert a.root == sqrt(3)
assert a.rep == DMP([QQ(1),QQ(0)], QQ)
a = AlgebraicNumber(2*sqrt(3), gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(1),QQ(0)], QQ)
a = AlgebraicNumber(sqrt(3)/2, gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(1),QQ(0)], QQ)
a = AlgebraicNumber(sqrt(3)/2, [S(7)/19, 3], gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(7,19),QQ(3)], QQ)
|
tarballs-are-good/sympy
|
sympy/polys/tests/test_numberfields.py
|
Python
|
bsd-3-clause
| 17,237
|
from __future__ import print_function, division
from abc import ABCMeta, abstractmethod
import numpy as np
from ciabatta.meta import make_repr_str
from ahoy import measurers
class CMeasurer(measurers.Measurer):
__metaclass__ = ABCMeta
@abstractmethod
def get_cs(self):
return
class FieldCMeasurer(CMeasurer):
def __init__(self, c_field, positions):
self.c_field = c_field
self.positions = positions
def get_cs(self):
return self.c_field.get_val_i(self.positions)
def __repr__(self):
fs = [('c_field', self.c_field)]
return make_repr_str(self, fs)
class LinearCMeasurer(CMeasurer):
def __init__(self, positions):
self.positions = positions
def get_cs(self):
return self.positions.dr[:, 0]
def __repr__(self):
fs = []
return make_repr_str(self, fs)
class GradCMeasurer(measurers.Measurer):
__metaclass__ = ABCMeta
@abstractmethod
def get_grad_cs(self):
return
class FieldGradCMeasurer(GradCMeasurer):
def __init__(self, c_field, positions):
self.c_field = c_field
self.positions = positions
def get_grad_cs(self):
return self.c_field.get_grad_i(self.positions)
def __repr__(self):
fs = [('c_field', self.c_field)]
return make_repr_str(self, fs)
class ConstantGradCMeasurer(GradCMeasurer):
def __init__(self, n, dim):
self.grad_c = np.zeros([n, dim])
self.grad_c[:, 0] = 1.0
@property
def n(self):
return self.grad_c.shape[0]
@property
def dim(self):
return self.grad_c.shape[1]
def get_grad_cs(self):
return self.grad_c
def __repr__(self):
fs = []
return make_repr_str(self, fs)
|
eddiejessup/ahoy
|
ahoy/c_measurers.py
|
Python
|
bsd-3-clause
| 1,777
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['LinearTrend'] , ['Seasonal_Second'] , ['SVR'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_Seasonal_Second_SVR.py
|
Python
|
bsd-3-clause
| 163
|
import unittest
from . database import engine, init_db
from . items import (UserItem,
AddressItem,
NewFieldItemUser,
OverrideFieldItemUser)
class BaseTestCase(unittest.TestCase):
def assertSortedEqual(self, first, second):
return self.assertEqual(sorted(first), sorted(second))
class SqlAlchemyItemTestCase(BaseTestCase):
def test_user(self):
""" test that makes a user item with right fields"""
u = UserItem()
self.assertSortedEqual(u.fields.keys(), ['id', 'name', 'full_name'])
def test_new_fields(self):
""" test that the item makes a field for items not declared in the db model """
u = NewFieldItemUser()
self.assertSortedEqual(u.fields.keys(), ['id', 'name', 'full_name', 'first_joined'])
def test_override_fields(self):
""" test can override a field when defining a SqlAlchemyItem """
u = OverrideFieldItemUser()
self.assertSortedEqual(u.fields.keys(), ['id', 'name', 'full_name'])
def test_has_keys(self):
""" test make sure Has primary keys works, has not nullable columns works
Test see if attributes that contain list of pks and not nullable columns"""
u = UserItem()
a = AddressItem()
self.assertEqual(['id'], u.primary_keys)
self.assertEqual(['id', 'email_address', 'time'], a.required_keys)
self.assertEqual({'id', 'email_address'}, a.null_primary_key_fields)
self.assertEqual({'id', 'email_address', 'time'}, a.null_required_fields)
self.assertTrue(u.null_primary_key_fields)
self.assertTrue(u.null_required_fields)
a['id'] = 100
a['email_address'] = "bigtime@thebigtime.come"
self.assertFalse(a.null_primary_key_fields)
self.assertEqual({'time'}, a.null_required_fields)
a['time'] = 'one o clock'
self.assertFalse(a.null_required_fields)
class SqlAlchemyItemDBTestCase(BaseTestCase):
def setUp(self):
init_db()
engine.execute(""" CREATE TABLE IF NOT EXISTS user2
(id INTEGER PRIMARY KEY,
name VARCHAR,
full_name VARCHAR)""")
engine.execute("""INSERT INTO user2
(id,name,full_name)
VALUES ('1','ryan','ryan the rhino');""")
engine.execute("""INSERT INTO user2
(id,name)
VALUES ('2','joe');""")
def tearDown(self):
engine.execute("delete from user2")
def test_db_setup(self):
"""test database was setup properly"""
self.assertSortedEqual((1, 'ryan', 'ryan the rhino'),
engine.execute('select * from user2').fetchone())
def test_commit_item(self):
"""test save fields into database """
u = UserItem()
u['id'] = 3
u['name'] = 'bob'
u['full_name'] = 'bob the bat'
u.commit_item(engine=engine)
self.assertSortedEqual([3, 'bob', 'bob the bat'],
engine.execute("Select * from user2 where user2.id = 3").fetchone())
def test_matching_dbrow_raises_nometadata(self):
u = UserItem()
u.table.metadata.bind = None
with self.assertRaises(AttributeError):
u.get_matching_dbrow()
def test_matching_dbrow_raises_null_primary_key(self):
u = UserItem()
u.table.metadata.bind = 'sqlite:///'
with self.assertRaises(ValueError):
u.get_matching_dbrow()
def test_matching_dbrow_pulls_matching_data(self):
u = UserItem()
u.table.metadata.bind = engine
u['id'] = 2
self.assertEqual((2, 'joe', None), u.get_matching_dbrow())
def test_matching_dbrow_uses_cache(self):
u = UserItem()
u.table.metadata.bind = engine
u['id'] = 2
u.get_matching_dbrow()
engine.execute("delete from user2")
self.assertEqual((2, 'joe', None), u.get_matching_dbrow())
|
ryancerf/scrapy-sqlitem
|
tests/test_sqlitem.py
|
Python
|
bsd-3-clause
| 3,895
|
default_app_config = 'decisions.subscriptions.apps.SubscriptionsConfig'
|
okffi/decisions
|
web/decisions/subscriptions/__init__.py
|
Python
|
bsd-3-clause
| 72
|
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as __
from wagtail.wagtailadmin.views.generic import CreateView, DeleteView, EditView, IndexView
from wagtail.wagtailcore.models import Site
from wagtail.wagtailcore.permissions import site_permission_policy
from wagtail.wagtailsites.forms import SiteForm
class Index(IndexView):
permission_policy = site_permission_policy
model = Site
context_object_name = 'sites'
template_name = 'wagtailsites/index.html'
add_url_name = 'wagtailsites:add'
page_title = __("Sites")
add_item_label = __("Add a site")
header_icon = 'site'
class Create(CreateView):
permission_policy = site_permission_policy
form_class = SiteForm
page_title = __("Add site")
success_message = __("Site '{0}' created.")
add_url_name = 'wagtailsites:add'
edit_url_name = 'wagtailsites:edit'
index_url_name = 'wagtailsites:index'
template_name = 'wagtailsites/create.html'
header_icon = 'site'
class Edit(EditView):
permission_policy = site_permission_policy
model = Site
form_class = SiteForm
success_message = __("Site '{0}' updated.")
error_message = __("The site could not be saved due to errors.")
delete_item_label = __("Delete site")
edit_url_name = 'wagtailsites:edit'
index_url_name = 'wagtailsites:index'
delete_url_name = 'wagtailsites:delete'
context_object_name = 'site'
template_name = 'wagtailsites/edit.html'
header_icon = 'site'
class Delete(DeleteView):
permission_policy = site_permission_policy
model = Site
success_message = __("Site '{0}' deleted.")
index_url_name = 'wagtailsites:index'
delete_url_name = 'wagtailsites:delete'
page_title = __("Delete site")
confirmation_message = __("Are you sure you want to delete this site?")
header_icon = 'site'
|
hamsterbacke23/wagtail
|
wagtail/wagtailsites/views.py
|
Python
|
bsd-3-clause
| 1,903
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.contrib.contenttypes import generic
from django.core.exceptions import ValidationError
from django.utils.functional import cached_property
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import get_language, ugettext_lazy as _
from shortuuid import uuid
from treebeard.mp_tree import MP_Node
from shortuuidfield import ShortUUIDField
from model_utils.managers import InheritanceManager
from . import mixins
from . import defaults
from .utils import unicode_slugify as slugify
from .managers import PageManager, ContainerManager
from .utils import get_container_names_from_template
class AbstractPageNode(MP_Node):
"""
Define the tree structure properties of the fancy page. This is a
separate abstract class to make sure that it can be easily replaced
by another tree handling library or none if needed.
"""
name = models.CharField(_("Name"), max_length=255, db_index=True)
slug = models.SlugField(_("Slug"), max_length=255, db_index=True)
image = models.ImageField(_('Image'), upload_to='fancypages/pages',
blank=True, null=True)
description = models.TextField(_("Description"), blank=True)
_slug_separator = u'/'
@models.permalink
def get_absolute_url(self):
return ('fancypages:page-detail', (), {'slug': self.slug})
def save(self, update_slugs=True, *args, **kwargs):
if update_slugs:
parent = self.get_parent()
slug = slugify(self.name)
# If category has a parent, includes the parents slug in this one
if parent:
self.slug = '%s%s%s' % (
parent.slug, self._slug_separator, slug)
else:
self.slug = slug
# Enforce slug uniqueness here as MySQL can't handle a unique index on
# the slug field
try:
match = self.__class__.objects.get(slug=self.slug)
except self.__class__.DoesNotExist:
pass
else:
if match.id != self.id:
raise ValidationError(
_("A page with slug '%(slug)s' already exists") % {
'slug': self.slug})
super(AbstractPageNode, self).save(*args, **kwargs)
def move(self, target, pos=None):
"""
Moves the current node and all its descendants to a new position
relative to another node.
See https://tabo.pe/projects/django-treebeard/docs/1.61/api.html
"""
super(AbstractPageNode, self).move(target, pos)
# Update the slugs and full names of all nodes in the new subtree.
# We need to reload self as 'move' doesn't update the current instance,
# then we iterate over the subtree and call save which automatically
# updates slugs.
reloaded_self = self.__class__.objects.get(pk=self.pk)
subtree = self.__class__.get_tree(parent=reloaded_self)
for node in subtree:
node.save()
move.alters_data = True
def __unicode__(self):
return "{0} ({1})".format(self.name, self.slug)
class Meta:
app_label = 'fancypages'
abstract = True
class AbstractPageType(models.Model):
uuid = ShortUUIDField(verbose_name=_("Unique ID"), db_index=True)
name = models.CharField(_("Name"), max_length=128)
slug = models.SlugField(_("Slug"), max_length=128)
template_name = models.CharField(_("Template name"), max_length=255)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
return super(AbstractPageType, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class Meta:
abstract = True
app_label = 'fancypages'
class AbstractPageGroup(models.Model):
"""
A page group provides a way to group fancy pages and retrieve only
pages within a specific group.
"""
uuid = ShortUUIDField(verbose_name=_("Unique ID"), db_index=True)
name = models.CharField(_("Name"), max_length=128)
slug = models.SlugField(_("Slug"), max_length=128, null=True, blank=True)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
return super(AbstractPageGroup, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class Meta:
abstract = True
app_label = 'fancypages'
class AbstractFancyPage(models.Model):
uuid = ShortUUIDField(verbose_name=_("Unique ID"), db_index=True)
# this field has to be NULLABLE for backwards compatibility but should
# never be left blank (hence, blank=False). We might be able to remove this
# at some point but migrations make it impossible to change without a
# default value. There's no sensible default, so we leave it nuu
node = models.OneToOneField(
settings.FP_NODE_MODEL, verbose_name=_("Tree node"),
related_name='page', null=True)
page_type = models.ForeignKey(
'fancypages.PageType', verbose_name=_("Page type"),
related_name="pages", null=True, blank=True)
keywords = models.CharField(_("Keywords"), max_length=255, blank=True)
containers = generic.GenericRelation('fancypages.Container')
PUBLISHED, DRAFT, ARCHIVED = (u'published', u'draft', u'archived')
STATUS_CHOICES = (
(PUBLISHED, _("Published")),
(DRAFT, _("Draft")),
(ARCHIVED, _("Archived")),
)
status = models.CharField(
_(u"Status"), max_length=15, choices=STATUS_CHOICES, blank=True)
date_visible_start = models.DateTimeField(
_("Visible from"), null=True, blank=True)
date_visible_end = models.DateTimeField(
_("Visible until"), null=True, blank=True)
groups = models.ManyToManyField(
'fancypages.PageGroup', verbose_name=_("Groups"), related_name="pages")
# this is the default manager that should is
# passed into subclasses when inheriting
objects = PageManager()
@property
def is_visible(self):
if self.status != AbstractFancyPage.PUBLISHED:
return False
now = timezone.now()
if self.date_visible_start and self.date_visible_start > now:
return False
if self.date_visible_end and self.date_visible_end < now:
return False
return True
@cached_property
def toplevel_parent(self):
"""
Get the top-level parent (root) of the current page. This will return
the current page if the page is a top-level page or ``None`` if the
no top-level page can be determined (faulty data). The result is cached
on first lookup.
:return: A top-level page object or ``None``.
"""
if self.depth == 1:
return self
try:
page = self.node.get_root().page
except (AttributeError, ObjectDoesNotExist):
page = None
return page
@models.permalink
def get_edit_page_url(self):
""" Get the dashboard URL for updating the page. """
return ("fp-dashboard:page-update", (), {'pk': self.pk})
@models.permalink
def get_add_child_url(self):
""" Get the dashboard URL for adding a child page. """
return ("fp-dashboard:child-page-create", (), {'parent_pk': self.pk})
@models.permalink
def get_delete_page_url(self):
""" Get the dashboard URL fo deleting this page. """
return ("fp-dashboard:page-delete", (), {'pk': self.pk})
@classmethod
def _split_kwargs(cls, dct, prefix="node__"):
prefixed = {}
cleaned = {}
for key, value in dct.iteritems():
if key.startswith(prefix):
prefixed[key.replace(prefix, '')] = value
else:
cleaned[key] = value
return prefixed, cleaned
def add_child(self, **kwargs):
node_kwargs, page_kwargs = self._split_kwargs(kwargs)
page_kwargs['node'] = self.node.add_child(**node_kwargs)
return self.__class__.objects.create(**page_kwargs)
add_child.alters_data = True
@classmethod
def add_root(cls, **kwargs):
from .utils import get_node_model
node_kwargs, page_kwargs = cls._split_kwargs(kwargs)
page_kwargs['node'] = get_node_model().add_root(**node_kwargs)
return cls.objects.create(**page_kwargs)
def get_children(self):
"""
Get all child pages as a queryset. It uses the related node's
``get_children`` method from ``treebeard`` but returning a queryset of
<FancyPage fancypages.models.FancyPage> objects instead of their nodes.
:return: Queryset of <FancyPage fancypages.models.FancyPage> objects.
"""
nodes = self.node.get_children()
return self.__class__.objects.filter(node__in=nodes)
def delete(self, using=None):
"""
Deletes the instance of ``FancyPage`` and makes sure that the related
``PageNode`` is deleted as well. This should usually be handled by the
``on_delete`` argument on the ``ForeignKey`` field but in this instance
it doesn't take effect. For the time being, the node object is delete
after the page has been removed.
"""
node = self.node
super(AbstractFancyPage, self).delete(using)
node.delete()
delete.alters_data = True
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError
try:
return getattr(self.node, name)
except AttributeError:
pass
raise AttributeError(
"neither '{}' nor '{}' have an attribute '{}".format(
self.__class__, self.node.__class__, name))
def get_container_from_name(self, name):
try:
return self.containers.get(name=name)
except models.get_model('fancypages', 'Container').DoesNotExist:
return None
def create_container(self, name):
if self.containers.filter(name=name).count():
return
self.containers.create(name=name)
create_container.alters_data = True
def __unicode__(self):
return u"FancyPage '{0}'".format(self.name)
def save(self, update_slugs=True, *args, **kwargs):
"""
Saving this page has several additional responsibilities to ensure
the consistency of the data. Before actually saving the model to the
database, it is ensured that *slug* and *status* are set on the page
if either of them is not defined. If not set, the slug is generated
from the page name. If the status is not set, the default status
defined in ``FP_DEFAULT_PAGE_STATUS`` is used.
After saving, all containers specified in the template for this page
that don't exist are created. Using the current language code used in
the overall context of this model.
"""
if not self.slug:
self.slug = slugify(self.name)
if not self.status:
self.status = getattr(
settings, 'FP_DEFAULT_PAGE_STATUS', self.DRAFT)
super(AbstractFancyPage, self).save(*args, **kwargs)
language_code = get_language()
for cname in self._get_missing_containers(language_code=language_code):
self.containers.create(
page_object=self, name=cname, language_code=language_code)
def _get_missing_containers(self, language_code=None):
language_code = language_code or get_language()
try:
template_name = self.page_type.template_name
except AttributeError:
template_name = getattr(
settings, 'FP_DEFAULT_TEMPLATE',
defaults.FP_DEFAULT_TEMPLATE)
cnames = self.containers.filter(
language_code=language_code).values_list('name')
existing_containers = [i[0] for i in cnames]
for cname in get_container_names_from_template(template_name):
if cname not in existing_containers:
yield cname
class Meta:
app_label = 'fancypages'
abstract = True
class AbstractContainer(mixins.TemplateNamesModelMixin, models.Model):
template_name = 'fancypages/container.html'
uuid = ShortUUIDField(verbose_name=_("Unique ID"), db_index=True)
# this is the name of the variable used in the template tag
# e.g. {% fancypages-container var-name %}
name = models.SlugField(_("Variable name"), max_length=50, blank=True)
title = models.CharField(_("Title"), max_length=100, blank=True)
language_code = models.CharField(
_("Language"), max_length=7, default=get_language())
# this allows for assigning a container to any type of model. This
# field is nullable as a container does not have to be assigned to a
# model. In that case, it can be placed in any template by simply passing
# the name into the template tag.
content_type = models.ForeignKey(ContentType, null=True)
object_id = models.PositiveIntegerField(null=True)
page_object = generic.GenericForeignKey('content_type', 'object_id')
objects = ContainerManager()
def clean(self):
if self.object_id and self.content_type:
return
# Don't allow draft entries to have a pub_date.
container_exists = self.__class__.objects.filter(
name=self.name, object_id=None, content_type=None).exists()
if container_exists:
raise ValidationError(
"a container with name '{0}' already exists".format(self.name))
@classmethod
def get_container_by_name(cls, name, obj=None, language_code=u''):
"""
Get container of *obj* with the specified variable *name*. It
assumes that *obj* has a ``containers`` attribute and returns
the container with *name* or ``None`` if it cannot be found.
"""
filters = {
'name': name, 'language_code': language_code or get_language()}
if not obj:
container, __ = cls.objects.get_or_create(**filters)
return container
object_type = ContentType.objects.get_for_model(obj)
if object_type is None:
return None
filters['content_type'] = object_type
filters['object_id'] = obj.id
ctn, __ = cls.objects.get_or_create(**filters)
return ctn
def save(self, *args, **kwargs):
self.clean()
# make sure that we have a UUID set, we might need it before it will
# be automatically generated by the ShortUUIDField
if not self.uuid:
self.uuid = unicode(uuid())
# Check if we have a name, if not we generate a name from the model
# name and the UUID of this block. This avoids collision when
# auto-generating new models without explicit name
if not self.name:
self.name = "{}-{}".format(self._meta.module_name, self.uuid)
return super(AbstractContainer, self).save(*args, **kwargs)
def __unicode__(self):
return u"Container '{}' in '{}' [{}]".format(
self.name, self.content_type, self.language_code)
class Meta:
abstract = True
app_label = 'fancypages'
class AbstractContentBlock(mixins.TemplateNamesModelMixin, models.Model):
name = None
code = None
group = None
template_name = None
renderer_class = None
form_class = None
default_template_names = [
"fancypages/blocks/{module_name}.html", "blocks/{module_name}.html"]
uuid = ShortUUIDField(verbose_name=_("Unique ID"), db_index=True)
# we ignore the related names for each content block model
# to prevent cluttering the container model. Also the look up has
# to be done more efficient than through these attributes.
container = models.ForeignKey(
'fancypages.Container', verbose_name=_("Container"),
related_name="blocks")
display_order = models.PositiveIntegerField()
objects = InheritanceManager()
@property
def language_code(self):
try:
return self.container.language_code
except AbstractContainer.DoesNotExist:
pass
return u''
@classmethod
def get_form_class(cls):
return cls.form_class
def get_renderer_class(self):
from fancypages.renderers import BlockRenderer
return self.renderer_class or BlockRenderer
def save(self, **kwargs):
if self.display_order is None:
self.display_order = self.container.blocks.count()
try:
db_block = self.__class__.objects.get(pk=self.pk)
except ObjectDoesNotExist:
db_block = self
db_container = db_block.container
db_display_order = db_block.display_order
super(AbstractContentBlock, self).save(**kwargs)
if db_display_order != self.display_order \
or self.container != db_container:
self.fix_block_positions(db_display_order, db_container)
def fix_block_positions(self, old_position, old_container):
if self.container != old_container:
for idx, block in enumerate(old_container.blocks.all()):
block.display_order = idx
block.save()
if self.display_order > old_position:
blocks = self.container.blocks.filter(
~models.Q(id=self.id) &
models.Q(display_order__lte=self.display_order)
)
for idx, block in enumerate(blocks):
block.display_order = idx
block.save()
else:
blocks = self.container.blocks.filter(
~models.Q(id=self.id) &
models.Q(display_order__gte=self.display_order)
)
for idx, block in enumerate(blocks):
block.display_order = self.display_order + idx + 1
block.save()
fix_block_positions.alters_data = True
def __unicode__(self):
return "Block #%s" % self.id
class Meta:
abstract = True
|
tangentlabs/django-fancypages
|
fancypages/abstract_models.py
|
Python
|
bsd-3-clause
| 18,345
|
"""
In scikit-learn 0.18, sklearn.grid_search was deprecated. Since
skutil handles the deprecation issues in skutil.utils.fixes, the
skutil.model_selection module merely provides the same import
functionality as sklearn 0.18, so sklearn users can seamlessly
migrate to skutil for grid_search imports.
"""
from skutil.grid_search import *
__all__ = [
'GridSearchCV',
'RandomizedSearchCV'
]
|
tgsmith61591/skutil
|
skutil/model_selection/__init__.py
|
Python
|
bsd-3-clause
| 399
|
from django.conf import settings
RELATIVE_FOR_YEAR = getattr(settings, 'RELATIVE_FOR_YEAR', 1)
RELATIVE_FOR_MONTH = getattr(settings, 'RELATIVE_FOR_MONTH', 3)
RELATIVE_FOR_WEEK = getattr(settings, 'RELATIVE_FOR_WEEK', 2)
|
unk2k/django-statistic
|
statistic/settings.py
|
Python
|
bsd-3-clause
| 223
|
from panels.models import Page, Note, Tag, TagType, StaticPage
from django.contrib import admin
from django.conf import settings
class NoteInline(admin.StackedInline):
model = Note
extra = 1
class Media:
js = (
settings.MEDIA_URL + 'general/js/tiny_mce/tiny_mce.js',
settings.MEDIA_URL + 'panels/js/admin/textareas.js',
)
class PageAdmin(admin.ModelAdmin):
filter_horizontal = ('tags',)
fieldsets = [
(None, {'fields': ['title', 'slug', 'image']}),
('Date Information', {'fields': ['pub_date', 'list_date']}),
('Extra Information', {'fields': ['tags']}),
]
inlines = [NoteInline,]
ordering = ('-list_date',)
prepopulated_fields = {"slug": ("title",)}
class StaticPageAdmin(admin.ModelAdmin):
fields = ('title', 'slug', 'content')
class Media:
js = (
settings.MEDIA_URL + 'general/js/tiny_mce/tiny_mce.js',
settings.MEDIA_URL + 'panels/js/admin/textareas.js',
)
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Page, PageAdmin)
admin.site.register(Tag)
admin.site.register(TagType)
admin.site.register(StaticPage, StaticPageAdmin)
|
jwadden/django-panels
|
panels/admin.py
|
Python
|
bsd-3-clause
| 1,209
|
"""
Utilities for conversion to writer-agnostic Excel representation.
"""
from __future__ import annotations
from functools import reduce
import itertools
import re
from typing import (
Any,
Callable,
Hashable,
Iterable,
Mapping,
Sequence,
cast,
)
import warnings
import numpy as np
from pandas._libs.lib import is_list_like
from pandas._typing import (
IndexLabel,
StorageOptions,
)
from pandas.util._decorators import doc
from pandas.core.dtypes import missing
from pandas.core.dtypes.common import (
is_float,
is_scalar,
)
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
)
import pandas.core.common as com
from pandas.core.shared_docs import _shared_docs
from pandas.io.formats._color_data import CSS4_COLORS
from pandas.io.formats.css import (
CSSResolver,
CSSWarning,
)
from pandas.io.formats.format import get_level_lengths
from pandas.io.formats.printing import pprint_thing
class ExcelCell:
__fields__ = ("row", "col", "val", "style", "mergestart", "mergeend")
__slots__ = __fields__
def __init__(
self,
row: int,
col: int,
val,
style=None,
mergestart: int | None = None,
mergeend: int | None = None,
):
self.row = row
self.col = col
self.val = val
self.style = style
self.mergestart = mergestart
self.mergeend = mergeend
class CssExcelCell(ExcelCell):
def __init__(
self,
row: int,
col: int,
val,
style: dict | None,
css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None,
css_row: int,
css_col: int,
css_converter: Callable | None,
**kwargs,
):
if css_styles and css_converter:
css = ";".join(
[a + ":" + str(v) for (a, v) in css_styles[css_row, css_col]]
)
style = css_converter(css)
return super().__init__(row=row, col=col, val=val, style=style, **kwargs)
class CSSToExcelConverter:
"""
A callable for converting CSS declarations to ExcelWriter styles
Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
focusing on font styling, backgrounds, borders and alignment.
Operates by first computing CSS styles in a fairly generic
way (see :meth:`compute_css`) then determining Excel style
properties from CSS properties (see :meth:`build_xlstyle`).
Parameters
----------
inherited : str, optional
CSS declarations understood to be the containing scope for the
CSS processed by :meth:`__call__`.
"""
NAMED_COLORS = CSS4_COLORS
VERTICAL_MAP = {
"top": "top",
"text-top": "top",
"middle": "center",
"baseline": "bottom",
"bottom": "bottom",
"text-bottom": "bottom",
# OpenXML also has 'justify', 'distributed'
}
BOLD_MAP = {
"bold": True,
"bolder": True,
"600": True,
"700": True,
"800": True,
"900": True,
"normal": False,
"lighter": False,
"100": False,
"200": False,
"300": False,
"400": False,
"500": False,
}
ITALIC_MAP = {
"normal": False,
"italic": True,
"oblique": True,
}
FAMILY_MAP = {
"serif": 1, # roman
"sans-serif": 2, # swiss
"cursive": 4, # script
"fantasy": 5, # decorative
}
# NB: Most of the methods here could be classmethods, as only __init__
# and __call__ make use of instance attributes. We leave them as
# instancemethods so that users can easily experiment with extensions
# without monkey-patching.
inherited: dict[str, str] | None
def __init__(self, inherited: str | None = None):
if inherited is not None:
self.inherited = self.compute_css(inherited)
else:
self.inherited = None
compute_css = CSSResolver()
def __call__(self, declarations_str: str) -> dict[str, dict[str, str]]:
"""
Convert CSS declarations to ExcelWriter style.
Parameters
----------
declarations_str : str
List of CSS declarations.
e.g. "font-weight: bold; background: blue"
Returns
-------
xlstyle : dict
A style as interpreted by ExcelWriter when found in
ExcelCell.style.
"""
# TODO: memoize?
properties = self.compute_css(declarations_str, self.inherited)
return self.build_xlstyle(properties)
def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]:
out = {
"alignment": self.build_alignment(props),
"border": self.build_border(props),
"fill": self.build_fill(props),
"font": self.build_font(props),
"number_format": self.build_number_format(props),
}
# TODO: handle cell width and height: needs support in pandas.io.excel
def remove_none(d: dict[str, str]) -> None:
"""Remove key where value is None, through nested dicts"""
for k, v in list(d.items()):
if v is None:
del d[k]
elif isinstance(v, dict):
remove_none(v)
if not v:
del d[k]
remove_none(out)
return out
def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]:
# TODO: text-indent, padding-left -> alignment.indent
return {
"horizontal": props.get("text-align"),
"vertical": self._get_vertical_alignment(props),
"wrap_text": self._get_is_wrap_text(props),
}
def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None:
vertical_align = props.get("vertical-align")
if vertical_align:
return self.VERTICAL_MAP.get(vertical_align)
return None
def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None:
if props.get("white-space") is None:
return None
return bool(props["white-space"] not in ("nowrap", "pre", "pre-line"))
def build_border(
self, props: Mapping[str, str]
) -> dict[str, dict[str, str | None]]:
return {
side: {
"style": self._border_style(
props.get(f"border-{side}-style"),
props.get(f"border-{side}-width"),
self.color_to_excel(props.get(f"border-{side}-color")),
),
"color": self.color_to_excel(props.get(f"border-{side}-color")),
}
for side in ["top", "right", "bottom", "left"]
}
def _border_style(self, style: str | None, width: str | None, color: str | None):
# convert styles and widths to openxml, one of:
# 'dashDot'
# 'dashDotDot'
# 'dashed'
# 'dotted'
# 'double'
# 'hair'
# 'medium'
# 'mediumDashDot'
# 'mediumDashDotDot'
# 'mediumDashed'
# 'slantDashDot'
# 'thick'
# 'thin'
if width is None and style is None and color is None:
# Return None will remove "border" from style dictionary
return None
if width is None and style is None:
# Return "none" will keep "border" in style dictionary
return "none"
if style == "none" or style == "hidden":
return "none"
width_name = self._get_width_name(width)
if width_name is None:
return "none"
if style in (None, "groove", "ridge", "inset", "outset", "solid"):
# not handled
return width_name
if style == "double":
return "double"
if style == "dotted":
if width_name in ("hair", "thin"):
return "dotted"
return "mediumDashDotDot"
if style == "dashed":
if width_name in ("hair", "thin"):
return "dashed"
return "mediumDashed"
def _get_width_name(self, width_input: str | None) -> str | None:
width = self._width_to_float(width_input)
if width < 1e-5:
return None
elif width < 1.3:
return "thin"
elif width < 2.8:
return "medium"
return "thick"
def _width_to_float(self, width: str | None) -> float:
if width is None:
width = "2pt"
return self._pt_to_float(width)
def _pt_to_float(self, pt_string: str) -> float:
assert pt_string.endswith("pt")
return float(pt_string.rstrip("pt"))
def build_fill(self, props: Mapping[str, str]):
# TODO: perhaps allow for special properties
# -excel-pattern-bgcolor and -excel-pattern-type
fill_color = props.get("background-color")
if fill_color not in (None, "transparent", "none"):
return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"}
def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]:
fc = props.get("number-format")
fc = fc.replace("§", ";") if isinstance(fc, str) else fc
return {"format_code": fc}
def build_font(
self, props: Mapping[str, str]
) -> dict[str, bool | int | float | str | None]:
font_names = self._get_font_names(props)
decoration = self._get_decoration(props)
return {
"name": font_names[0] if font_names else None,
"family": self._select_font_family(font_names),
"size": self._get_font_size(props),
"bold": self._get_is_bold(props),
"italic": self._get_is_italic(props),
"underline": ("single" if "underline" in decoration else None),
"strike": ("line-through" in decoration) or None,
"color": self.color_to_excel(props.get("color")),
# shadow if nonzero digit before shadow color
"shadow": self._get_shadow(props),
}
def _get_is_bold(self, props: Mapping[str, str]) -> bool | None:
weight = props.get("font-weight")
if weight:
return self.BOLD_MAP.get(weight)
return None
def _get_is_italic(self, props: Mapping[str, str]) -> bool | None:
font_style = props.get("font-style")
if font_style:
return self.ITALIC_MAP.get(font_style)
return None
def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]:
decoration = props.get("text-decoration")
if decoration is not None:
return decoration.split()
else:
return ()
def _get_underline(self, decoration: Sequence[str]) -> str | None:
if "underline" in decoration:
return "single"
return None
def _get_shadow(self, props: Mapping[str, str]) -> bool | None:
if "text-shadow" in props:
return bool(re.search("^[^#(]*[1-9]", props["text-shadow"]))
return None
def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]:
font_names_tmp = re.findall(
r"""(?x)
(
"(?:[^"]|\\")+"
|
'(?:[^']|\\')+'
|
[^'",]+
)(?=,|\s*$)
""",
props.get("font-family", ""),
)
font_names = []
for name in font_names_tmp:
if name[:1] == '"':
name = name[1:-1].replace('\\"', '"')
elif name[:1] == "'":
name = name[1:-1].replace("\\'", "'")
else:
name = name.strip()
if name:
font_names.append(name)
return font_names
def _get_font_size(self, props: Mapping[str, str]) -> float | None:
size = props.get("font-size")
if size is None:
return size
return self._pt_to_float(size)
def _select_font_family(self, font_names) -> int | None:
family = None
for name in font_names:
family = self.FAMILY_MAP.get(name)
if family:
break
return family
def color_to_excel(self, val: str | None) -> str | None:
if val is None:
return None
if self._is_hex_color(val):
return self._convert_hex_to_excel(val)
try:
return self.NAMED_COLORS[val]
except KeyError:
warnings.warn(f"Unhandled color format: {repr(val)}", CSSWarning)
return None
def _is_hex_color(self, color_string: str) -> bool:
return bool(color_string.startswith("#"))
def _convert_hex_to_excel(self, color_string: str) -> str:
code = color_string.lstrip("#")
if self._is_shorthand_color(color_string):
return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper()
else:
return code.upper()
def _is_shorthand_color(self, color_string: str) -> bool:
"""Check if color code is shorthand.
#FFF is a shorthand as opposed to full #FFFFFF.
"""
code = color_string.lstrip("#")
if len(code) == 3:
return True
elif len(code) == 6:
return False
else:
raise ValueError(f"Unexpected color {color_string}")
class ExcelFormatter:
"""
Class for formatting a DataFrame to a list of ExcelCells,
Parameters
----------
df : DataFrame or Styler
na_rep: na representation
float_format : str, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : bool or sequence of str, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : bool, default True
output row names (index)
index_label : str or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
merge_cells : bool, default False
Format MultiIndex and Hierarchical Rows as merged cells.
inf_rep : str, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
style_converter : callable, optional
This translates Styler styles (CSS) into ExcelWriter styles.
Defaults to ``CSSToExcelConverter()``.
It should have signature css_declarations string -> excel style.
This is only called for body cells.
"""
max_rows = 2**20
max_cols = 2**14
def __init__(
self,
df,
na_rep: str = "",
float_format: str | None = None,
cols: Sequence[Hashable] | None = None,
header: Sequence[Hashable] | bool = True,
index: bool = True,
index_label: IndexLabel | None = None,
merge_cells: bool = False,
inf_rep: str = "inf",
style_converter: Callable | None = None,
):
self.rowcounter = 0
self.na_rep = na_rep
if not isinstance(df, DataFrame):
self.styler = df
self.styler._compute() # calculate applied styles
df = df.data
if style_converter is None:
style_converter = CSSToExcelConverter()
self.style_converter: Callable | None = style_converter
else:
self.styler = None
self.style_converter = None
self.df = df
if cols is not None:
# all missing, raise
if not len(Index(cols).intersection(df.columns)):
raise KeyError("passes columns are not ALL present dataframe")
if len(Index(cols).intersection(df.columns)) != len(set(cols)):
# Deprecated in GH#17295, enforced in 1.0.0
raise KeyError("Not all names specified in 'columns' are found")
self.df = df.reindex(columns=cols)
self.columns = self.df.columns
self.float_format = float_format
self.index = index
self.index_label = index_label
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
@property
def header_style(self):
return {
"font": {"bold": True},
"borders": {
"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin",
},
"alignment": {"horizontal": "center", "vertical": "top"},
}
def _format_value(self, val):
if is_scalar(val) and missing.isna(val):
val = self.na_rep
elif is_float(val):
if missing.isposinf_scalar(val):
val = self.inf_rep
elif missing.isneginf_scalar(val):
val = f"-{self.inf_rep}"
elif self.float_format is not None:
val = float(self.float_format % val)
if getattr(val, "tzinfo", None) is not None:
raise ValueError(
"Excel does not support datetimes with "
"timezones. Please ensure that datetimes "
"are timezone unaware before writing to Excel."
)
return val
def _format_header_mi(self) -> Iterable[ExcelCell]:
if self.columns.nlevels > 1:
if not self.index:
raise NotImplementedError(
"Writing to Excel with MultiIndex columns and no "
"index ('index'=False) is not yet implemented."
)
if not (self._has_aliases or self.header):
return
columns = self.columns
level_strs = columns.format(
sparsify=self.merge_cells, adjoin=False, names=False
)
level_lengths = get_level_lengths(level_strs)
coloffset = 0
lnum = 0
if self.index and isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0]) - 1
if self.merge_cells:
# Format multi-index as a merged cells.
for lnum, name in enumerate(columns.names):
yield ExcelCell(
row=lnum,
col=coloffset,
val=name,
style=self.header_style,
)
for lnum, (spans, levels, level_codes) in enumerate(
zip(level_lengths, columns.levels, columns.codes)
):
values = levels.take(level_codes)
for i, span_val in spans.items():
mergestart, mergeend = None, None
if span_val > 1:
mergestart, mergeend = lnum, coloffset + i + span_val
yield CssExcelCell(
row=lnum,
col=coloffset + i + 1,
val=values[i],
style=self.header_style,
css_styles=getattr(self.styler, "ctx_columns", None),
css_row=lnum,
css_col=i,
css_converter=self.style_converter,
mergestart=mergestart,
mergeend=mergeend,
)
else:
# Format in legacy format with dots to indicate levels.
for i, values in enumerate(zip(*level_strs)):
v = ".".join(map(pprint_thing, values))
yield CssExcelCell(
row=lnum,
col=coloffset + i + 1,
val=v,
style=self.header_style,
css_styles=getattr(self.styler, "ctx_columns", None),
css_row=lnum,
css_col=i,
css_converter=self.style_converter,
)
self.rowcounter = lnum
def _format_header_regular(self) -> Iterable[ExcelCell]:
if self._has_aliases or self.header:
coloffset = 0
if self.index:
coloffset = 1
if isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0])
colnames = self.columns
if self._has_aliases:
self.header = cast(Sequence, self.header)
if len(self.header) != len(self.columns):
raise ValueError(
f"Writing {len(self.columns)} cols "
f"but got {len(self.header)} aliases"
)
else:
colnames = self.header
for colindex, colname in enumerate(colnames):
yield CssExcelCell(
row=self.rowcounter,
col=colindex + coloffset,
val=colname,
style=self.header_style,
css_styles=getattr(self.styler, "ctx_columns", None),
css_row=0,
css_col=colindex,
css_converter=self.style_converter,
)
def _format_header(self) -> Iterable[ExcelCell]:
gen: Iterable[ExcelCell]
if isinstance(self.columns, MultiIndex):
gen = self._format_header_mi()
else:
gen = self._format_header_regular()
gen2: Iterable[ExcelCell] = ()
if self.df.index.names:
row = [x if x is not None else "" for x in self.df.index.names] + [
""
] * len(self.columns)
if reduce(lambda x, y: x and y, map(lambda x: x != "", row)):
gen2 = (
ExcelCell(self.rowcounter, colindex, val, self.header_style)
for colindex, val in enumerate(row)
)
self.rowcounter += 1
return itertools.chain(gen, gen2)
def _format_body(self) -> Iterable[ExcelCell]:
if isinstance(self.df.index, MultiIndex):
return self._format_hierarchical_rows()
else:
return self._format_regular_rows()
def _format_regular_rows(self) -> Iterable[ExcelCell]:
if self._has_aliases or self.header:
self.rowcounter += 1
# output index and index_label?
if self.index:
# check aliases
# if list only take first as this is not a MultiIndex
if self.index_label and isinstance(
self.index_label, (list, tuple, np.ndarray, Index)
):
index_label = self.index_label[0]
# if string good to go
elif self.index_label and isinstance(self.index_label, str):
index_label = self.index_label
else:
index_label = self.df.index.names[0]
if isinstance(self.columns, MultiIndex):
self.rowcounter += 1
if index_label and self.header is not False:
yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style)
# write index_values
index_values = self.df.index
if isinstance(self.df.index, PeriodIndex):
index_values = self.df.index.to_timestamp()
for idx, idxval in enumerate(index_values):
yield CssExcelCell(
row=self.rowcounter + idx,
col=0,
val=idxval,
style=self.header_style,
css_styles=getattr(self.styler, "ctx_index", None),
css_row=idx,
css_col=0,
css_converter=self.style_converter,
)
coloffset = 1
else:
coloffset = 0
yield from self._generate_body(coloffset)
def _format_hierarchical_rows(self) -> Iterable[ExcelCell]:
if self._has_aliases or self.header:
self.rowcounter += 1
gcolidx = 0
if self.index:
index_labels = self.df.index.names
# check for aliases
if self.index_label and isinstance(
self.index_label, (list, tuple, np.ndarray, Index)
):
index_labels = self.index_label
# MultiIndex columns require an extra row
# with index names (blank if None) for
# unambiguous round-trip, unless not merging,
# in which case the names all go on one row Issue #11328
if isinstance(self.columns, MultiIndex) and self.merge_cells:
self.rowcounter += 1
# if index labels are not empty go ahead and dump
if com.any_not_none(*index_labels) and self.header is not False:
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style)
if self.merge_cells:
# Format hierarchical rows as merged cells.
level_strs = self.df.index.format(
sparsify=True, adjoin=False, names=False
)
level_lengths = get_level_lengths(level_strs)
for spans, levels, level_codes in zip(
level_lengths, self.df.index.levels, self.df.index.codes
):
values = levels.take(
level_codes,
allow_fill=levels._can_hold_na,
fill_value=levels._na_value,
)
for i, span_val in spans.items():
mergestart, mergeend = None, None
if span_val > 1:
mergestart = self.rowcounter + i + span_val - 1
mergeend = gcolidx
yield CssExcelCell(
row=self.rowcounter + i,
col=gcolidx,
val=values[i],
style=self.header_style,
css_styles=getattr(self.styler, "ctx_index", None),
css_row=i,
css_col=gcolidx,
css_converter=self.style_converter,
mergestart=mergestart,
mergeend=mergeend,
)
gcolidx += 1
else:
# Format hierarchical rows with non-merged values.
for indexcolvals in zip(*self.df.index):
for idx, indexcolval in enumerate(indexcolvals):
yield CssExcelCell(
row=self.rowcounter + idx,
col=gcolidx,
val=indexcolval,
style=self.header_style,
css_styles=getattr(self.styler, "ctx_index", None),
css_row=idx,
css_col=gcolidx,
css_converter=self.style_converter,
)
gcolidx += 1
yield from self._generate_body(gcolidx)
@property
def _has_aliases(self) -> bool:
"""Whether the aliases for column names are present."""
return is_list_like(self.header)
def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]:
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = self.df.iloc[:, colidx]
for i, val in enumerate(series):
yield CssExcelCell(
row=self.rowcounter + i,
col=colidx + coloffset,
val=val,
style=None,
css_styles=getattr(self.styler, "ctx", None),
css_row=i,
css_col=colidx,
css_converter=self.style_converter,
)
def get_formatted_cells(self) -> Iterable[ExcelCell]:
for cell in itertools.chain(self._format_header(), self._format_body()):
cell.val = self._format_value(cell.val)
yield cell
@doc(storage_options=_shared_docs["storage_options"])
def write(
self,
writer,
sheet_name="Sheet1",
startrow=0,
startcol=0,
freeze_panes=None,
engine=None,
storage_options: StorageOptions = None,
):
"""
writer : path-like, file-like, or ExcelWriter object
File path or existing ExcelWriter
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
freeze_panes : tuple of integer (length 2), default None
Specifies the one-based bottommost row and rightmost column that
is to be frozen
engine : string, default None
write engine to use if writer is a path - you can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``,
and ``io.excel.xlsm.writer``.
.. deprecated:: 1.2.0
As the `xlwt <https://pypi.org/project/xlwt/>`__ package is no longer
maintained, the ``xlwt`` engine will be removed in a future
version of pandas.
{storage_options}
.. versionadded:: 1.2.0
"""
from pandas.io.excel import ExcelWriter
num_rows, num_cols = self.df.shape
if num_rows > self.max_rows or num_cols > self.max_cols:
raise ValueError(
f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} "
f"Max sheet size is: {self.max_rows}, {self.max_cols}"
)
formatted_cells = self.get_formatted_cells()
if isinstance(writer, ExcelWriter):
need_save = False
else:
# error: Cannot instantiate abstract class 'ExcelWriter' with abstract
# attributes 'engine', 'save', 'supported_extensions' and 'write_cells'
writer = ExcelWriter( # type: ignore[abstract]
writer, engine=engine, storage_options=storage_options
)
need_save = True
try:
writer._write_cells(
formatted_cells,
sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
)
finally:
# make sure to close opened file handles
if need_save:
writer.close()
|
pandas-dev/pandas
|
pandas/io/formats/excel.py
|
Python
|
bsd-3-clause
| 31,258
|
"""
Optimal dimension for noisy linear equations to get accurate concentrations
"""
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import lstsq
from scipy.optimize import nnls
from functools import partial
########################################################
#
# Parameters
#
########################################################
#np.random.seed(121)
# number of unknowns
N_min, N_max = 2, 6
# number of equations
M_min, M_max = 2, 100
# multiplicative noise
MulNoiseGen = partial(np.random.normal, scale=0.005)
# Linear equations solver
LinEqsSolver = nnls
########################################################
#
#
#
########################################################
# Generate unknowns
x_exact = np.abs(np.random.rand(N_max))
x_exact /= x_exact.sum()
# Generate matrix
A_exact = np.abs( np.random.rand(M_max, N_max) )
# Find the rhs of the linear equations
b_exact = A_exact.dot( x_exact )
########################################################
#
# Analysis
#
########################################################
errors = np.zeros( (N_max-N_min+1, M_max-M_min+1), dtype=np.float )
for n in np.arange(N_min, N_max+1) :
for m in np.arange(M_min, M_max+1) :
# Selecting subsystem with
# `n` unknowns and `m` equations
A = np.copy( A_exact[:m, :n] )
b = np.copy( b_exact[:m] )
# Contaminating system with the multiplicative noise
A *= (1. + MulNoiseGen(size=A.size).reshape(A.shape) )
b *= (1. + MulNoiseGen(size=b.size).reshape(b.shape) )
#A += MulNoiseGen(size=A.size).reshape(A.shape)
#b += MulNoiseGen(size=b.size).reshape(b.shape)
# Enforce positivity
A = np.abs(A)
b = np.abs(b)
# Solve noisy linear equation
x = LinEqsSolver(A, b)[0]
errors[n-N_min, m-M_min] = np.max( np.abs( x - x_exact[:n] ) / x_exact[:n] )
print ( np.abs(LinEqsSolver(A_exact, b_exact)[0] - x_exact)/x_exact ).max()
########################################################
#
# Plotting
#
########################################################
plt.subplot(211)
plt.imshow( np.log(errors), extent=(M_min, M_max, N_min, N_max), interpolation ='nearest', origin='lower' )
#plt.imshow( np.log10(errors), )
plt.xlabel("Number of equations")
plt.ylabel("Number of unknowns")
plt.title("Log of maximum relative error")
plt.colorbar()
plt.subplot(212)
plt.plot( np.arange(M_min, M_max+1), np.log10(errors[-1,:]) )
plt.title ("Log of maximum relative error for *** unknowns")
print errors.mean()
plt.show()
|
dibondar/PyPhotonicReagents
|
projetcs/ODD_pulse_shaper/theoretical analysis/opt_size_nosy_linear_eqs.py
|
Python
|
bsd-3-clause
| 2,494
|
from __future__ import print_function
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import textwrap
def downloadFromURL(uris=None, fileNames=None, nodeNames=None, loadFiles=None,
customDownloader=None, loadFileTypes=None, loadFileProperties={}):
"""Download and optionally load data into the application.
:param uris: Download URL(s).
:param fileNames: File name(s) that will be downloaded (and loaded).
:param nodeNames: Node name(s) in the scene.
:param loadFiles: Boolean indicating if file(s) should be loaded. By default, the function decides.
:param customDownloader: Custom function for downloading.
:param loadFileTypes: file format name(s) ('VolumeFile' by default).
:param loadFileProperties: custom properties passed to the IO plugin.
If the given ``fileNames`` are not found in the application cache directory, they
are downloaded using the associated URIs.
See ``slicer.mrmlScene.GetCacheManager().GetRemoteCacheDirectory()``
If not explicitly provided or if set to ``None``, the ``loadFileTypes`` are
guessed based on the corresponding filename extensions.
If a given fileName has the ``.mrb`` or ``.mrml`` extension, it will **not** be loaded
by default. To ensure the file is loaded, ``loadFiles`` must be set.
The ``loadFileProperties`` are common for all files. If different properties
need to be associated with files of different types, downloadFromURL must
be called for each.
"""
return SampleDataLogic().downloadFromURL(
uris, fileNames, nodeNames, loadFiles, customDownloader, loadFileTypes, loadFileProperties)
def downloadSample(sampleName):
"""For a given sample name this will search the available sources
and load it if it is available. Returns the first loaded node."""
return SampleDataLogic().downloadSamples(sampleName)[0]
def downloadSamples(sampleName):
"""For a given sample name this will search the available sources
and load it if it is available. Returns the loaded nodes."""
return SampleDataLogic().downloadSamples(sampleName)
#
# AstroSampleData
#
class AstroSampleData(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Astro Sample Data"
self.parent.categories = ["Astronomy"]
self.parent.dependencies = ["AstroVolume"]
self.parent.contributors = ["""
Davide Punzo (Kapteyn Astronomical Institute),
Thijs van der Hulst (Kapteyn Astronomical Institute) and
Jos Roerdink (Johann Bernoulli Institute)."""]
self.parent.helpText = """
The AstroSampleData module can be used to download data for working with in SlicerAstro.
Use of this module requires an active network connection."""
self.parent.acknowledgementText = """
This module was developed by Davide Punzo. <br>
This work was supported by ERC grant nr. 291531 and the Slicer Community. <br><br>
Data acknowledgement: <br>
WEIN069: Mpati Ramatsoku and Marc Verheijen (Kapteyn Astronomical Institute); <br>
WEIN069_MASK: mask generated using SoFiA (https://github.com/SoFiA-Admin/SoFiA); <br>
NGC2403: THING survey; <br>
NGC2403_DSS: optical image from DSS; <br>
NGC3379 and NGC4111: ATLAS3D survey. <br>
This file has been originally edited by Steve Pieper.
"""
self.parent.icon = qt.QIcon(':Icons/XLarge/NGC2841.png')
self.parent = parent
if slicer.mrmlScene.GetTagByClassName( "vtkMRMLScriptedModuleNode" ) != 'ScriptedModule':
slicer.mrmlScene.RegisterNodeClass(vtkMRMLScriptedModuleNode())
# Trigger the menu to be added when application has started up
if not slicer.app.commandOptions().noMainWindow :
slicer.app.connect("startupCompleted()", self.addMenu)
# allow other modules to register sample data sources by appending
# instances or subclasses SampleDataSource objects on this list
try:
slicer.modules.sampleDataSources
except AttributeError:
slicer.modules.sampleDataSources = {}
def addMenu(self):
actionIcon = self.parent.icon
a = qt.QAction(actionIcon, 'Download Sample Data', slicer.util.mainWindow())
a.setToolTip('Go to the SampleData module to download data from the network')
a.connect('triggered()', self.select)
fileMenu = slicer.util.lookupTopLevelWidget('FileMenu')
if fileMenu:
for action in fileMenu.actions():
if action.text == 'Save':
fileMenu.insertAction(action,a)
def select(self):
m = slicer.util.mainWindow()
m.moduleSelector().selectModule('AstroSampleData')
#
# AstroSampleDataSource
#
class AstroSampleDataSource:
"""
Describe a set of sample data associated with one or multiple URIs and filenames.
"""
def __init__(self, sampleName=None, sampleDescription=None, uris=None, fileNames=None, nodeNames=None, loadFiles=None,
customDownloader=None, thumbnailFileName=None,
loadFileType=None, loadFileProperties={}):
"""
:param sampleName: Name identifying the data set.
:param sampleDescription: Displayed name of data set in SampleData module GUI. (default is ``sampleName``)
:param thumbnailFileName: Displayed thumbnail of data set in SampleData module GUI,
:param uris: Download URL(s).
:param fileNames: File name(s) that will be downloaded (and loaded).
:param nodeNames: Node name(s) in the scene.
:param loadFiles: Boolean indicating if file(s) should be loaded.
:param customDownloader: Custom function for downloading.
:param loadFileType: file format name(s) ('VolumeFile' by default if node name is specified).
:param loadFileProperties: custom properties passed to the IO plugin.
"""
self.sampleName = sampleName
if sampleDescription is None:
sampleDescription = sampleName
self.sampleDescription = sampleDescription
if (isinstance(uris, list) or isinstance(uris, tuple)):
if isinstance(loadFileType, str) or loadFileType is None:
loadFileType = [loadFileType] * len(uris)
if nodeNames is None:
nodeNames = [None] * len(uris)
if loadFiles is None:
loadFiles = [None] * len(uris)
elif isinstance(uris, str):
uris = [uris,]
fileNames = [fileNames,]
nodeNames = [nodeNames,]
loadFiles = [loadFiles,]
loadFileType = [loadFileType,]
updatedFileType = []
for fileName, nodeName, fileType in zip(fileNames, nodeNames, loadFileType):
# If not explicitly specified, attempt to guess fileType
if fileType is None:
if nodeName is not None:
# TODO: Use method from Slicer IO logic ?
fileType = "VolumeFile"
else:
ext = os.path.splitext(fileName.lower())[1]
if ext in [".mrml", ".mrb"]:
fileType = "SceneFile"
elif ext in [".zip"]:
fileType = "ZipFile"
updatedFileType.append(fileType)
self.uris = uris
self.fileNames = fileNames
self.nodeNames = nodeNames
self.loadFiles = loadFiles
self.customDownloader = customDownloader
self.thumbnailFileName = thumbnailFileName
self.loadFileType = updatedFileType
self.loadFileProperties = loadFileProperties
if not len(uris) == len(fileNames) == len(nodeNames) == len(loadFiles) == len(updatedFileType):
raise Exception("All fields of sample data source must have the same length")
def __str__(self):
output = [
"sampleName : %s" % self.sampleName,
"sampleDescription : %s" % self.sampleDescription,
"thumbnailFileName : %s" % self.thumbnailFileName,
"loadFileProperties: %s" % self.loadFileProperties,
"customDownloader : %s" % self.customDownloader,
""
]
for fileName, uri, nodeName, loadFile, fileType in zip(self.fileNames, self.uris, self.nodeNames, self.loadFiles, self.loadFileType):
output.extend([
"fileName : %s" % fileName,
"uri : %s" % uri,
"nodeName : %s" % nodeName,
"loadFile : %s" % loadFile,
"loadFileType: %s" % fileType,
""
])
return "\n".join(output)
#
# SampleData widget
#
class AstroSampleDataWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# This module is often used in developer mode, therefore
# collapse reload & test section by default.
if hasattr(self, "reloadCollapsibleButton"):
self.reloadCollapsibleButton.collapsed = True
self.observerTags = []
self.logic = AstroSampleDataLogic(self.logMessage)
numberOfColumns = 3
iconPath = os.path.join(os.path.dirname(__file__).replace('\\','/'), 'Resources','Icons')
desktop = qt.QDesktopWidget()
mainScreenSize = desktop.availableGeometry(desktop.primaryScreen)
iconSize = qt.QSize(mainScreenSize.width()/15,mainScreenSize.height()/10)
categories = sorted(slicer.modules.sampleDataSources.keys())
if self.logic.builtInCategoryName in categories:
categories.remove(self.logic.builtInCategoryName)
if 'AstronomicalData' in categories:
categories.remove('AstronomicalData')
categories.insert(0, 'AstronomicalData')
for category in categories:
if category == self.logic.developmentCategoryName and self.developerMode is False:
continue
frame = ctk.ctkCollapsibleGroupBox(self.parent)
self.layout.addWidget(frame)
frame.title = category
frame.name = '%sCollapsibleGroupBox' % category
layout = qt.QGridLayout(frame)
columnIndex = 0
rowIndex = 0
for source in slicer.modules.sampleDataSources[category]:
name = source.sampleDescription
if not name:
name = source.nodeNames[0]
b = qt.QToolButton()
b.setText(name)
# Set thumbnail
if source.thumbnailFileName:
# Thumbnail provided
thumbnailImage = source.thumbnailFileName
else:
# Look for thumbnail image with the name of any node name with .png extension
thumbnailImage = None
for nodeName in source.nodeNames:
if not nodeName:
continue
thumbnailImageAttempt = os.path.join(iconPath, nodeName+'.png')
if os.path.exists(thumbnailImageAttempt):
thumbnailImage = thumbnailImageAttempt
break
if thumbnailImage and os.path.exists(thumbnailImage):
b.setIcon(qt.QIcon(thumbnailImage))
b.setIconSize(iconSize)
b.setToolButtonStyle(qt.Qt.ToolButtonTextUnderIcon)
qSize = qt.QSizePolicy()
qSize.setHorizontalPolicy(qt.QSizePolicy.Expanding)
b.setSizePolicy(qSize)
b.name = '%sPushButton' % name
layout.addWidget(b, rowIndex, columnIndex)
columnIndex += 1
if columnIndex==numberOfColumns:
rowIndex += 1
columnIndex = 0
if source.customDownloader:
b.connect('clicked()', source.customDownloader)
else:
b.connect('clicked()', lambda s=source: self.logic.downloadFromSource(s))
self.log = qt.QTextEdit()
self.log.readOnly = True
self.layout.addWidget(self.log)
self.logMessage('<p>Status: <i>Idle</i>')
# Add spacer to layout
self.layout.addStretch(1)
def logMessage(self, message, logLevel=logging.INFO):
# Set text color based on log level
if logLevel >= logging.ERROR:
message = '<font color="red">' + message + '</font>'
elif logLevel >= logging.WARNING:
message = '<font color="orange">' + message + '</font>'
# Show message in status bar
doc = qt.QTextDocument()
doc.setHtml(message)
slicer.util.showStatusMessage(doc.toPlainText(),3000)
# Show message in log window at the bottom of the module widget
self.log.insertHtml(message)
self.log.insertPlainText('\n')
self.log.ensureCursorVisible()
self.log.repaint()
logging.log(logLevel, message)
slicer.app.processEvents(qt.QEventLoop.ExcludeUserInputEvents)
#
# SampleData logic
#
class AstroSampleDataLogic:
"""Manage the slicer.modules.sampleDataSources dictionary.
The dictionary keys are categories of sample data sources.
The BuiltIn category is managed here. Modules or extensions can
register their own sample data by creating instances of the
AstroSampleDataSource class. These instances should be stored in a
list that is assigned to a category following the model
used in registerBuiltInSampleDataSources below.
"""
@staticmethod
def registerCustomSampleDataSource(category='Custom',
sampleName=None, uris=None, fileNames=None, nodeNames=None,
customDownloader=None, thumbnailFileName=None,
loadFileType='VolumeFile', loadFiles=None, loadFileProperties={}):
"""Adds custom data sets to SampleData.
:param category: Section title of data set in SampleData module GUI.
:param sampleName: Displayed name of data set in SampleData module GUI.
:param thumbnailFileName: Displayed thumbnail of data set in SampleData module GUI,
:param uris: Download URL(s).
:param fileNames: File name(s) that will be loaded.
:param nodeNames: Node name(s) in the scene.
:param customDownloader: Custom function for downloading.
:param loadFileType: file format name(s) ('VolumeFile' by default).
:param loadFiles: Boolean indicating if file(s) should be loaded. By default, the function decides.
:param loadFileProperties: custom properties passed to the IO plugin.
"""
try:
slicer.modules.sampleDataSources
except AttributeError:
slicer.modules.sampleDataSources = {}
if category not in slicer.modules.sampleDataSources:
slicer.modules.sampleDataSources[category] = []
slicer.modules.sampleDataSources[category].append(AstroSampleDataSource(
sampleName=sampleName,
uris=uris,
fileNames=fileNames,
nodeNames=nodeNames,
thumbnailFileName=thumbnailFileName,
loadFileType=loadFileType,
loadFiles=loadFiles,
loadFileProperties=loadFileProperties
))
def __init__(self, logMessage=None):
if logMessage:
self.logMessage = logMessage
self.builtInCategoryName = 'BuiltIn'
self.developmentCategoryName = 'Development'
self.registerBuiltInAstroSampleDataSources()
def registerBuiltInAstroSampleDataSources(self):
"""Fills in the pre-define sample data sources"""
sourceArguments = (
('WEIN069', None, 'http://slicer.kitware.com/midas3/download/item/337752/WEIN069.fits', 'WEIN069.fits', 'WEIN069'),
('WEIN069_MASK', None, 'http://slicer.kitware.com/midas3/download/item/266403/WEIN069_mask.fits', 'WEIN069_mask.fits', 'WEIN069_mask'),
('NGC2403_DSS', None, 'http://slicer.kitware.com/midas3/download/item/365486/NGC2403_DSS.fits', 'NGC2403_DSS.fits', 'NGC2403_DSS'),
('NGC2403', None, 'http://slicer.kitware.com/midas3/download/item/359776/NGC2403.fits+%281%29', 'NGC2403.fits', 'NGC2403'),
('NGC4111', None, 'http://slicer.kitware.com/midas3/download/item/242880/NGC4111.fits', 'NGC4111.fits', 'NGC4111'),
('NGC3379', None, 'http://slicer.kitware.com/midas3/download/item/242866/NGC3379.fits', 'NGC3379.fits', 'NGC3379'),
)
if 'AstronomicalData' not in slicer.modules.sampleDataSources:
slicer.modules.sampleDataSources['AstronomicalData'] = []
for sourceArgument in sourceArguments:
slicer.modules.sampleDataSources['AstronomicalData'].append(AstroSampleDataSource(*sourceArgument))
def downloadFileIntoCache(self, uri, name):
"""Given a uri and and a filename, download the data into
a file of the given name in the scene's cache"""
destFolderPath = slicer.mrmlScene.GetCacheManager().GetRemoteCacheDirectory()
if not os.access(destFolderPath, os.W_OK):
try:
os.mkdir(destFolderPath)
except:
self.logMessage('<b>Failed to create cache folder %s</b>' % destFolderPath, logging.ERROR)
if not os.access(destFolderPath, os.W_OK):
self.logMessage('<b>Cache folder %s is not writable</b>' % destFolderPath, logging.ERROR)
return self.downloadFile(uri, destFolderPath, name)
def downloadSourceIntoCache(self, source):
"""Download all files for the given source and return a
list of file paths for the results"""
filePaths = []
for uri,fileName in zip(source.uris,source.fileNames):
filePaths.append(self.downloadFileIntoCache(uri, fileName))
return filePaths
def downloadFromSource(self,source,attemptCount=0):
"""Given an instance of AstroSampleDataSource, downloads the associated data and
load them into Slicer if it applies.
The function always returns a list.
Based on the fileType(s), nodeName(s) and loadFile(s) associated with
the source, different values may be appended to the returned list:
- if nodeName is specified, appends loaded nodes but if ``loadFile`` is False appends downloaded filepath
- if fileType is ``SceneFile``, appends downloaded filepath
- if fileType is ``ZipFile``, appends directory of extracted archive but if ``loadFile`` is False appends downloaded filepath
If no ``nodeNames`` and no ``fileTypes`` are specified or if ``loadFiles`` are all False,
returns the list of all downloaded filepaths.
"""
nodes = []
filePaths = []
for uri,fileName,nodeName,loadFile,loadFileType in zip(source.uris,source.fileNames,source.nodeNames,source.loadFiles,source.loadFileType):
current_source = AstroSampleDataSource(uris=uri, fileNames=fileName, nodeNames=nodeName, loadFiles=loadFile, loadFileType=loadFileType, loadFileProperties=source.loadFileProperties)
filePath = self.downloadFileIntoCache(uri, fileName)
filePaths.append(filePath)
if loadFileType == 'ZipFile':
if loadFile == False:
nodes.append(filePath)
continue
outputDir = slicer.mrmlScene.GetCacheManager().GetRemoteCacheDirectory() + "/" + os.path.splitext(os.path.basename(filePath))[0]
qt.QDir().mkpath(outputDir)
success = slicer.util.extractArchive(filePath, outputDir)
if not success and attemptCount < 5:
file = qt.QFile(filePath)
if not file.remove():
self.logMessage('<b>Load failed! Unable to delete and try again loading %s!</b>' % filePath, logging.ERROR)
nodes.append(None)
break
attemptCount += 1
self.logMessage('<b>Load failed! Trying to download again (%d of 5 attempts)...</b>' % (attemptCount), logging.ERROR)
outputDir = self.downloadFromSource(current_source,attemptCount)[0]
nodes.append(outputDir)
elif loadFileType == 'SceneFile':
if not loadFile:
nodes.append(filePath)
continue
success = self.loadScene(filePath, source.loadFileProperties)
if not success and attemptCount < 5:
file = qt.QFile(filePath)
if not file.remove():
self.logMessage('<b>Load failed! Unable to delete and try again loading %s!</b>' % filePath, logging.ERROR)
nodes.append(None)
break
attemptCount += 1
self.logMessage('<b>Load failed! Trying to download again (%d of 5 attempts)...</b>' % (attemptCount), logging.ERROR)
filePath = self.downloadFromSource(current_source,attemptCount)[0]
nodes.append(filePath)
elif nodeName:
if loadFile == False:
nodes.append(filePath)
continue
loadedNode = self.loadNode(filePath, nodeName, loadFileType, source.loadFileProperties)
if loadedNode is None and attemptCount < 5:
file = qt.QFile(filePath)
if not file.remove():
self.logMessage('<b>Load failed! Unable to delete and try again loading %s!</b>' % filePath, logging.ERROR)
nodes.append(None)
break
attemptCount += 1
self.logMessage('<b>Load failed! Trying to download again (%d of 5 attempts)...</b>' % (attemptCount), logging.ERROR)
loadedNode = self.downloadFromSource(current_source,attemptCount)[0]
nodes.append(loadedNode)
if nodes:
return nodes
else:
return filePaths
def sourceForSampleName(self,sampleName):
"""For a given sample name this will search the available sources.
Returns SampleDataSource instance."""
for category in slicer.modules.sampleDataSources.keys():
for source in slicer.modules.sampleDataSources[category]:
if sampleName == source.sampleName:
return source
return None
def downloadFromURL(self, uris=None, fileNames=None, nodeNames=None, loadFiles=None,
customDownloader=None, loadFileTypes=None, loadFileProperties={}):
"""Download and optionally load data into the application.
:param uris: Download URL(s).
:param fileNames: File name(s) that will be downloaded (and loaded).
:param nodeNames: Node name(s) in the scene.
:param loadFiles: Boolean indicating if file(s) should be loaded. By default, the function decides.
:param customDownloader: Custom function for downloading.
:param loadFileTypes: file format name(s) ('VolumeFile' by default).
:param loadFileProperties: custom properties passed to the IO plugin.
If the given ``fileNames`` are not found in the application cache directory, they
are downloaded using the associated URIs.
See ``slicer.mrmlScene.GetCacheManager().GetRemoteCacheDirectory()``
If not explicitly provided or if set to ``None``, the ``loadFileTypes`` are
guessed based on the corresponding filename extensions.
If a given fileName has the ``.mrb`` or ``.mrml`` extension, it will **not** be loaded
by default. To ensure the file is loaded, ``loadFiles`` must be set.
The ``loadFileProperties`` are common for all files. If different properties
need to be associated with files of different types, downloadFromURL must
be called for each.
"""
return self.downloadFromSource(AstroSampleDataSource(
uris=uris, fileNames=fileNames, nodeNames=nodeNames, loadFiles=loadFiles,
loadFileType=loadFileTypes, loadFileProperties=loadFileProperties
))
def downloadSample(self,sampleName):
"""For a given sample name this will search the available sources
and load it if it is available. Returns the first loaded node."""
return self.downloadSamples(sampleName)[0]
def downloadSamples(self,sampleName):
"""For a given sample name this will search the available sources
and load it if it is available. Returns the loaded nodes."""
source = self.sourceForSampleName(sampleName)
nodes = []
if source:
nodes = self.downloadFromSource(source)
return nodes
def logMessage(self,message):
print(message)
def humanFormatSize(self,size):
""" from http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size"""
for x in ['bytes','KB','MB','GB']:
if size < 1024.0 and size > -1024.0:
return "%3.1f %s" % (size, x)
size /= 1024.0
return "%3.1f %s" % (size, 'TB')
def reportHook(self,blocksSoFar,blockSize,totalSize):
# we clamp to 100% because the blockSize might be larger than the file itself
percent = min(int((100. * blocksSoFar * blockSize) / totalSize), 100)
if percent == 100 or (percent - self.downloadPercent >= 10):
# we clamp to totalSize when blockSize is larger than totalSize
humanSizeSoFar = self.humanFormatSize(min(blocksSoFar * blockSize, totalSize))
humanSizeTotal = self.humanFormatSize(totalSize)
self.logMessage('<i>Downloaded %s (%d%% of %s)...</i>' % (humanSizeSoFar, percent, humanSizeTotal))
self.downloadPercent = percent
def downloadFile(self, uri, destFolderPath, name):
filePath = destFolderPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
import urllib.request, urllib.parse, urllib.error
self.logMessage('<b>Requesting download</b> <i>%s</i> from %s...' % (name, uri))
# add a progress bar
self.downloadPercent = 0
try:
urllib.request.urlretrieve(uri, filePath, self.reportHook)
self.logMessage('<b>Download finished</b>')
except IOError as e:
self.logMessage('<b>\tDownload failed: %s</b>' % e, logging.ERROR)
else:
self.logMessage('<b>File already exists in cache - reusing it.</b>')
return filePath
def loadScene(self, uri, fileProperties = {}):
self.logMessage('<b>Requesting load</b> %s...' % uri)
fileProperties['fileName'] = uri
success = slicer.app.coreIOManager().loadNodes('SceneFile', fileProperties)
if not success:
self.logMessage('<b>\tLoad failed!</b>', logging.ERROR)
return False
self.logMessage('<b>Load finished</b>')
return True
def loadNode(self, uri, name, fileType = 'VolumeFile', fileProperties = {}):
self.logMessage('<b>Requesting load</b> <i>%s</i> from %s...' % (name, uri))
fileProperties['fileName'] = uri
fileProperties['name'] = name
fileProperties['center'] = True
if "mask" in name:
fileProperties['labelmap'] = True
firstLoadedNode = None
loadedNodes = vtk.vtkCollection()
success = slicer.app.coreIOManager().loadNodes(fileType, fileProperties, loadedNodes)
if not success or loadedNodes.GetNumberOfItems()<1:
self.logMessage('<b>\tLoad failed!</b>', logging.ERROR)
return None
self.logMessage('<b>Load finished</b>')
# since nodes were read from a temp directory remove the storage nodes
for i in range(loadedNodes.GetNumberOfItems()):
loadedNode = loadedNodes.GetItemAsObject(i)
if not loadedNode.IsA("vtkMRMLStorableNode"):
continue
storageNode = loadedNode.GetStorageNode()
if not storageNode:
continue
slicer.mrmlScene.RemoveNode(storageNode)
loadedNode.SetAndObserveStorageNodeID(None)
return loadedNodes.GetItemAsObject(0)
|
Punzo/SlicerAstro
|
AstroSampleData/AstroSampleData.py
|
Python
|
bsd-3-clause
| 26,359
|
from flask import Flask
import db
# Global server variable
server = Flask(__name__)
#Load the file-backed Database of Shots
db = db.ShotDB("data/shots")
from app import views
from app import resources
|
stanford-gfx/Horus
|
Code/HorusApp/app/__init__.py
|
Python
|
bsd-3-clause
| 205
|
#encoding=gbk
import sys
import time
import pdb
sys.path.append('../src')
from uniq import get_simhash
from db import DBQuery
class IRecord(object):
'''
'''
PRIMARY_KEY = 'id'
DB_TABLE = ''
KEYS = []
def __hash__(self):
return self.id
def __eq__(self, other):
return self.id == other.id
def __cmp__(self, other):
return cmp(self.id, other.id)
def get_sort_key(self):
'''
qiyi, youku, qq, sohu, tudou, fusion, pps, letv
'''
default = 100
dic = {u'baidu':5, u'douban':6, u'mtime':7,\
u'youku':11, u'qiyi':12, u'tudou':13, u'sohu':14, u'letv':15, u'56':16, u'sina':17, \
u'funshion':18, u'pps':19, u'kankan':20}
return dic.get(self.site, default)
@staticmethod
def get_site_rank(site):
default = 1.0
dic = {u'douban':default, u'mtime':default, u'baidu':1.2, \
u'youku':default, u'qiyi':default, u'tudou':default, u'sohu':default, u'letv':default, \
u'56':default, u'sina':default, \
u'funshion':default, u'pps':default, u'kankan':default}
return dic.get(site, default)
@staticmethod
def get_key_title_site_rank(site):
default = 1.0
dic = {u'douban':default, u'mtime':default, u'baidu':10, \
u'youku':default, u'qiyi':default, u'tudou':default, u'sohu':default, u'letv':default, \
u'56':default, u'sina':default, \
u'funshion':default, u'pps':default, u'kankan':default}
return dic.get(site, default)
@staticmethod
def get_episode_site_rank(site):
default = 0.5
dic = {u'baidu':0.9945, u'qq':0.8893, u'tvmao':0.7560, u'tudou':0.8571,\
u'douban':0.0023, u'qiyi':0.9306, u'56':0.8514, u'wasu':0.8407,\
u'pptv':0.9216, u'sohu':0.8848, u'youku':0.9117, u'cntv':0.7645,\
u'funshion':0.8590, u'letv':0.9141, u'pps':0.9009, u'sina':0.8758}
return dic.get(site, default)
class AlbumCompareRecord(IRecord):
'''
¾ÛºÏÓõÄrecord
'''
DB_TABLE = ''
PRIMARY_KEY = 'id'
KEYS = []
def __init__(self):
self.raw_record = None
self.id = 0
self.title = ''
self.alias = ''
self.site = ''
#Ô¤´¦Àí·ÖÎö³öÀ´µÄ
############
self.segments = []
self.trunk = ''
self.trunk_season = ''
self.key_title = ''
self.sub_title = ''
self.season_num = 0
self.sub_season = ''
self.total_season_num = 0
self.season_type = ''
self.version = ''
self.vversion = ''
self.version_type = 1
self.title_language = ''
self.region_language = ''
self.video_type = 0
#verbose video type
self.vvt = ''
############
self.poster_md = ''
self.poster_url = ''
self.image_size = 0
self.album_language = ''
self.video_language = ''
self.category = ''
self.album_type = ''
self.directors = ''
self.actors = ''
self.intro = ''
self.site = ''
self.total_episode_num = 0
self.newest_episode_num = 0
self.region = ''
self.pub_year = 0
self.pub_time = 0
self.siteid_pair_list = ''
self.dead = False
self.ended = 0
self.simhash_set = set()
def to_repository_record(self):
rawrecord = self.raw_record
record = AlbumRepositoryRecord()
record.id = self.id
record.title = self.title
record.key_title = self.key_title
record.sub_title = self.sub_title
record.trunk = self.trunk
record.sub_season = self.sub_season
record.show_title = self.title
record.alias = self.alias
record.category = self.category
record.album_type = self.album_type
record.directors = self.directors
record.actors = self.actors
record.intro = self.intro
record.region = self.region
record.pub_year = '%04d' % self.pub_year
record.pub_time = self.pub_time
record.total_episode_num = self.total_episode_num
record.newest_episode_num = self.newest_episode_num
record.siteid_pair_list = self.siteid_pair_list
record.season_num = self.season_num or 0
record.total_season_num = self.total_season_num or 0
record.album_language = self.region_language
record.ended = self.ended
record.video_type = self.video_type
record.poster_md = self.poster_md
strtime = time.strftime('%F %T' , time.localtime(time.time()))
record.insert_time = strtime
record.update_time = strtime
record.status = 0
record.score = 0
record.manual_checked = 0
record.manual_deleted = 0
return record
class AlbumRepositoryRecord(IRecord):
'''
֪ʶ¿â
'''
PRIMARY_KEY = 'id'
DB_TABLE = 'album_repository'
KEYS = ['id', 'title', 'key_title', 'trunk', 'sub_title', 'show_title','sub_season', \
'alias', 'category', 'play_times', 'album_type', 'status',\
'album_language', 'directors', 'is_hd', 'actors', 'intro', 'region', 'pub_year', 'pub_time', 'video_type',\
'tag', 'poster_md', 'score', 'total_episode_num', 'newest_episode_num', \
'manual_deleted', 'major_version_rating',\
'siteid_pair_list', 'ended', 'manual_weights', 'manual_checked', 'manual_edited_fields', 'simhash',\
'season_num', 'total_season_num', 'sim_siteid_pair_list',\
'version', 'insert_time', 'update_time']
STATUS_NORMAL = 0
STATUS_UPDATE = 1
STATUS_NEW = 2
def __init__(self):
super(self.__class__, self).__init__()
for key in self.KEYS:
self.__setattr__(key, '')
self.compare_record = None
self.simhash_set = set()
self.site = 'repository'
#½¨Á¢simhash , ·½±ãÕ¾ÄÚÅÅÖØ
def build_simhash_set(self, album_records_id_dict):
for siteid_pair in self.siteid_pair_list.split('|'):
items = siteid_pair.split(':')
if len(items) < 2:
continue
site, id = items[:2]
if not id.isdigit():
continue
id = int(id)
if id not in album_records_id_dict:
continue
record = album_records_id_dict[id]
simhash = get_simhash(record)
self.simhash_set.add(simhash)
def get_album_compare_record(self, nmlz_func=None, debug=False):
if not self.compare_record:
self.compare_record = self.to_album_compare_record(nmlz_func, debug)
return self.compare_record
def to_album_compare_record(self, nmlz_func=None, debug=False):
record = AlbumCompareRecord()
record.raw_record = self
record.id = self.id
record.title = self.title
record.alias = self.alias
#Ô¤´¦Àí·ÖÎö³öÀ´µÄ
############
record.season_num = self.season_num or 0
record.total_season_num = self.total_season_num or 0
record.album_language = self.album_language
record.version = self.version
record.trunk = self.trunk
record.sub_season = self.sub_season
############
record.poster_md = self.poster_md
record.category = self.category
record.album_type = self.album_type
record.directors = self.directors
record.actors = self.actors
record.intro = self.intro
record.site = 'repository'
record.total_episode_num = self.total_episode_num
record.newest_episode_num = self.newest_episode_num
record.region = self.region
record.video_type = self.video_type
try:
record.pub_year = int(self.pub_year)
except:
record.pub_year = 0
record.pub_time = self.pub_time
siteid_pair_list = ''
for spl in self.siteid_pair_list.split('|'):
if not spl or ':' not in spl or spl in siteid_pair_list:
continue
if not siteid_pair_list:
siteid_pair_list = spl
else:
siteid_pair_list += '|' + spl
record.siteid_pair_list = siteid_pair_list
record.simhash_set = self.simhash_set
if nmlz_func:
nmlz_func(record, debug)
return record
def merge_compare_record(self, compare_record):
if not self.siteid_pair_list:
self.siteid_pair_list = compare_record.siteid_pair_list
else:
for spl in compare_record.siteid_pair_list.split('|'):
spl = spl.strip()
if not spl or spl in self.siteid_pair_list:
continue
if len(self.siteid_pair_list) + len(spl) >= 2000:
print >> sys.stderr, 'repository_id:%d siteid_pair_list too long' % self.id
continue
self.siteid_pair_list += '|' + spl
self.simhash_set |= compare_record.simhash_set
#֪ʶ¿âÊý¾Ý²»¸üÐÂ
#for k in ['alias', 'directors', 'actors', 'album_type']:
# if k in self.manual_edited_fields:
# continue
# origin = self.__dict__[k]
# new = compare_record.__dict__[k]
# new_str = ';'.join([t for t in new.split(';') if t not in origin.split(';')])
# if new_str:
# self.__dict__[k] = ';'.join([origin, new_str])
#
#for k in ['season_num', 'album_language', 'version']:
# if k not in self.manual_edited_fields and not self.__dict__[k]:
# self.__dict__[k] = compare_record.__dict__[k]
# #ÓÃÐÅÏ¢Á¿×î´óµÄtitle×÷Ϊshow_title
# self.show_title = compare_record.title
for k in ['region', 'intro', 'total_episode_num', 'newest_episode_num']:
if k not in self.manual_edited_fields and not self.__dict__[k]:
self.__dict__[k] = compare_record.__dict__[k]
if not self.pub_year and compare_record.pub_year:
self.pub_year = '%04d' % compare_record.pub_year
if not self.season_num:
self.season_num = 0
if self.total_season_num < compare_record.total_season_num:
self.total_season_num = compare_record.total_season_num
if not self.ended:
self.ended = compare_record.ended
#merge¹ýºóµÄrepository_recordÐèÒªÖØÐÂÉú³Écompare_record?
if self.compare_record:
if not self.compare_record.ended:
self.compare_record.ended = compare_record.ended
if not self.compare_record.sub_title:
self.compare_record.sub_title = compare_record.sub_title
if not self.compare_record.season_num:
self.compare_record.season_num = compare_record.season_num
if not self.compare_record.pub_year:
self.compare_record.pub_year = compare_record.pub_year
if not self.compare_record.version:
self.compare_record.version = compare_record.version
if not self.compare_record.album_language:
self.compare_record.album_language = compare_record.album_language
if self.compare_record.total_season_num < compare_record.total_season_num:
self.compare_record.total_season_num = compare_record.total_season_num
self.compare_record.simhash_set |= compare_record.simhash_set
class AlbumRecord(IRecord):
'''
raw album
'''
PRIMARY_KEY = 'id'
DB_TABLE = 'raw_album'
KEYS = ['id', 'A1', 'A10', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9', 'actors', \
'album_final_id', 'album_language', 'language', 'album_type', 'album_url', 'area', 'category', \
'change_signal', 'collection_count', 'comment_count', 'cover_image_download_status', \
'cover_image_md5', 'cover_image_url', 'cover_image_url1', 'cover_image_url1_download_status', \
'cover_image_url1_md5', 'description', 'directors', 'from_channel', 'hd', 'image_size', \
'insert_time', 'is_end', 'key_md5', 'key_md5_1', 'key_md5_2', 'last_chage_time', \
'last_update_time', 'manual_deleted', 'now_episode', 'other_title', 'play_count', 'pub_time', \
'pub_year', 'real_now_episode', 'scores', 'season', 'version', 'site', 'site_album_id', \
'status', 'tag', 'title', 'total_episode', 'update_level', 'update_time', 'dead_link', \
'protocol_deleted', 'video_type', 'manual_edited_fields', 'video_language', 'src_rank']
STATUS_NORMAL = 0
STATUS_UPDATE = 1
def __init__(self):
super(self.__class__, self).__init__()
for key in self.KEYS:
self.__setattr__(key, '')
self.compare_record = None
def dead(self):
return self.protocol_deleted or self.manual_deleted or self.dead_link or self.src_rank == -1
def get_album_compare_record(self, nmlz_func=None, debug=False):
if not self.compare_record:
self.compare_record = self.to_album_compare_record(nmlz_func, debug)
return self.compare_record
def to_album_compare_record(self, nmlz_func=None, debug=False):
record = AlbumCompareRecord()
record.dead = self.dead()
record.raw_record = self
record.id = self.id
record.title = self.title
record.alias = self.other_title
record.album_language = self.album_language
record.video_language = self.video_language
#Ô¤´¦Àí·ÖÎö³öÀ´µÄ
############
record.key_title = ''
record.sub_title = ''
record.season_num = 0
record.version = ''
record.title_language = ''
#############
record.category = self.from_channel
record.album_type = self.album_type
record.directors = self.directors
record.actors = self.actors
record.intro = self.description
record.site = self.site
record.poster_md = self.cover_image_md5
record.poster_url = self.cover_image_url
record.image_size = self.image_size
record.ended = self.is_end
record.video_type = self.video_type
try:
record.total_episode_num = int(self.total_episode)
except:
record.total_episode_num = 0
record.newest_episode_num = int(self.real_now_episode)
if self.now_episode.isdigit() and record.newest_episode_num < int(self.now_episode):
record.newest_episode_num = int(self.now_episode)
#try:
# record.newest_episode_num = int(record.newest_episode_num)
#except:
# record.newest_episode_num = 0
#if record.total_episode_num and record.total_episode_num < record.newest_episode_num:
# record.total_episode_num = record.newest_episode_num
record.region = self.area
try:
record.pub_year = int(self.pub_year[:4])
except:
record.pub_year = 0
record.pub_time = self.pub_time
record.simhash_set.add(get_simhash(self))
record.siteid_pair_list = '%s:%s' % (self.site, self.id)
if nmlz_func:
nmlz_func(record, debug)
return record
class VideoRecord(IRecord):
'''
µ¥ÊÓÆµ
'''
DB_TABLE = 'video_records'
PRIMARY_KEY = 'id'
KEYS = ['id', 'B1', 'B2', 'B3', 'B4', 'actors', 'album_final_id', 'change_signal', \
'collection_number', 'comment_number', 'complete_title', 'description', 'directors',\
'duration', 'episode_number', 'hd', 'image_download_status', 'image_md5', \
'image_size', 'image_url', 'insert_time', 'key_md5', 'key_md5_1', 'manual_deleted', \
'pub_time', 'raw_album_id', 'site', 'status', 'tag', 'title', 'update_time', 'url']
def __init__(self):
super(self.__class__, self).__init__()
for key in self.KEYS:
self.__setattr__(key, '')
class ClusterRelationRecord(IRecord):
'''
'''
DB_TABLE = 'checked_final_relation'
PRIMARY_KEY = 'key_id'
KEYS = ['relation', 'kid', 'pid', 'user', 'comment']
def __init__(self):
super(self.__class__, self).__init__()
for key in self.KEYS:
self.__setattr__(key, '')
|
lokicui/classifier
|
common/records.py
|
Python
|
bsd-3-clause
| 16,412
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
# TODO: Incluir geracao do hash para verificacao se pode ou nao revalidar o bloqueio
class DeleteForm(forms.Form):
def __init__(self, id_obj=None, *args, **kwargs):
self._id = id_obj
super(DeleteForm, self).__init__(*args, **kwargs)
idd = forms.CharField(widget=forms.HiddenInput)
hashe = forms.CharField(widget=forms.HiddenInput)
def clean(self):
idd = self.cleaned_data.get('idd')
hashe = self.cleaned_data.get('hashe')
if not self._id == idd and not idd == hashe:
raise forms.ValidationError(
"Hash errado"
)
class ReValidateForm(forms.Form):
def __init__(self, id_obj=None, *args, **kwargs):
self._id = id_obj
super(ReValidateForm, self).__init__(*args, **kwargs)
idd = forms.CharField(widget=forms.HiddenInput)
hashe = forms.CharField(widget=forms.HiddenInput)
def clean(self):
idd = self.cleaned_data.get('idd')
hashe = self.cleaned_data.get('hashe')
if not self._id == idd and not idd == hashe:
raise forms.ValidationError(
"Hash errado"
)
|
luzfcb/luzfcb_dj_simplelock
|
luzfcb_dj_simplelock/forms.py
|
Python
|
bsd-3-clause
| 1,240
|
#!/usr/bin/env python
import sys
import os
import code
import readline
import rlcompleter
sys.path.append('../src')
from Bybop_Discovery import *
import Bybop_Device
print 'Searching for devices'
discovery = Discovery(DeviceID.ALL)
discovery.wait_for_change()
devices = discovery.get_devices()
discovery.stop()
if not devices:
print 'Oops ...'
sys.exit(1)
device = devices.itervalues().next()
print 'Will connect to ' + get_name(device)
d2c_port = 54321
controller_type = "PC"
controller_name = "bybop shell"
drone = Bybop_Device.create_and_connect(device, d2c_port, controller_type, controller_name)
if drone is None:
print 'Unable to connect to a product'
sys.exit(1)
drone.dump_state()
vars = globals().copy()
vars.update(locals())
readline.set_completer(rlcompleter.Completer(vars).complete)
readline.parse_and_bind("tab: complete")
shell = code.InteractiveConsole(vars)
shell.interact()
drone.stop()
|
Parrot-Developers/bybop
|
samples/interactive.py
|
Python
|
bsd-3-clause
| 940
|
# Copyright (c) 2016, Nordic Semiconductor
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Setup script for nrfjprog.
USAGE:
python setup.py install or python setup.py bdist_egg (to create a Python egg)
"""
#import fnmatch
import os
from setuptools import setup, find_packages
#import subprocess
import sys
from nrfjprog import nrfjprog_version
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def read_requirements(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).readlines()
setup(
name='nRF5-universal-prog',
version=nrfjprog_version.NRFJPROG_VERSION,
description='The nRF5-universal-prog command line tool implemented in Python.',
long_description=read('README.md'),
url='https://github.com/NordicPlayground/nRF5-universal-prog',
author='Nordic Semiconductor ASA',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Embedded Systems',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords='nRF5 nRF51 nRF52 nrfjprog pynrfjprog pyOCD Nordic Semiconductor SEGGER JLink',
install_requires=read_requirements('requirements.txt'),
packages=find_packages(exclude=["tests.*", "tests"]),
include_package_data=False
)
"""if __name__ == '__main__':
print('#### Auto formatting all Python code in nRFTools according to PEP 8...')
matches = []
for root, dirnames, filenames in os.walk(
os.path.dirname(os.path.realpath(__file__))):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for match in matches:
subprocess.check_call(
["autopep8", "--in-place", "--aggressive", "--aggressive", match])"""
|
mjdietzx/nrfjprog
|
setup.py
|
Python
|
bsd-3-clause
| 3,700
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import (DatetimeIndex, TimedeltaIndex, Float64Index, Int64Index,
to_timedelta, timedelta_range, date_range,
Series,
Timestamp, Timedelta)
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
_holder = TimedeltaIndex
@pytest.mark.xfail(reason='GH#18824 ufunc add cannot use operands...')
def test_tdi_with_offset_array(self):
# GH#18849
tdi = pd.TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
offs = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = pd.TimedeltaIndex(['1 days 01:00:00', '3 days 04:02:00'])
res = tdi + offs
tm.assert_index_equal(res, expected)
res2 = offs + tdi
tm.assert_index_equal(res2, expected)
anchored = np.array([pd.offsets.QuarterEnd(),
pd.offsets.Week(weekday=2)])
with pytest.raises(TypeError):
tdi + anchored
# TODO: Split by ops, better name
def test_numeric_compat(self):
idx = self._holder(np.arange(5, dtype='int64'))
didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result,
self._holder(np.arange(5, dtype='int64') * 5))
result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result, self._holder(np.arange(
5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
# invalid
pytest.raises(TypeError, lambda: idx * idx)
pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3)))
pytest.raises(ValueError, lambda: idx * np.array([1, 2]))
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
def test_tdi_add_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_sub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and datetime-like
def test_tdi_sub_timestamp_raises(self):
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
def test_tdi_add_timestamp(self):
idx = TimedeltaIndex(['1 day', '2 day'])
result = idx + Timestamp('2011-01-01')
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_tdi_radd_timestamp(self):
idx = TimedeltaIndex(['1 day', '2 day'])
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# TODO: Split by operation, better name
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
pytest.raises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# floor divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng // offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
pytest.raises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
pytest.raises(ValueError, lambda: tdi + dti[0:1])
pytest.raises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
pytest.raises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
pytest.raises(TypeError, lambda: td * other)
pytest.raises(TypeError, lambda: other * td)
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
tm.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
assert s.dtype == object
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
assert s2.dtype == object
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
assert s.dtype == object
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 + NA
tm.assert_series_equal(actual, sn)
actual = NA + s1
tm.assert_series_equal(actual, sn)
actual = s1 - NA
tm.assert_series_equal(actual, sn)
actual = -NA + s1
tm.assert_series_equal(actual, sn)
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 + NA
tm.assert_frame_equal(actual, dfn)
actual = df1 - NA
tm.assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([_NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
(to_timedelta([_NaT, '5 days', '1 hours']) -
to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (to_timedelta([pd.NaT, '5 days', '1 hours']) +
to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
def test_timedeltaindex_add_timestamp_nat_masking(self):
# GH17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4D'
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2D'
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# TODO: Needs more informative name, probably split up into
# more targeted tests
def test_timedelta(self, freq):
index = date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
|
zfrenchee/pandas
|
pandas/tests/indexes/timedeltas/test_arithmetic.py
|
Python
|
bsd-3-clause
| 27,803
|
import os
from rosdistro import get_index, get_distribution_cache
FILES_DIR = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files'))
def test_get_release_cache():
url = 'file://' + FILES_DIR + '/index_v2.yaml'
i = get_index(url)
get_distribution_cache(i, 'foo')
|
mintar/ros-infrastructure-rosdistro
|
test/test_cache.py
|
Python
|
bsd-3-clause
| 307
|
# coding: utf-8
"""
DisGeNET Interface
~~~~~~~~~~~~~~~~~~
"""
import os
import requests
import pandas as pd
# Tool to create required caches
from biovida.support_tools._cache_management import package_cache_creator
# BioVida Support Tools
from biovida.support_tools.support_tools import header, camel_to_snake_case, list_to_bulletpoints
# BioVida Printing Tools
from biovida.support_tools.printing import dict_pprint
# ---------------------------------------------------------------------------------------------
# DisGeNET Reference Data
# ---------------------------------------------------------------------------------------------
_disgenet_delimited_databases = {
# Source: http://www.disgenet.org/web/DisGeNET/menu/downloads#curated
# Structure: {database_short_name: {full_name: ..., url: ..., description: ..., number_of_rows_in_header: ...}}
'all': {
'full_name': 'All Gene-Disease Associations',
'url': 'http://www.disgenet.org/ds/DisGeNET/results/all_gene_disease_associations.tsv.gz',
'description': 'The file contains all gene-disease associations in DisGeNET.',
'header': 21
},
'curated': {
'full_name': 'Curated Gene-Disease Associations',
'url': 'http://www.disgenet.org/ds/DisGeNET/results/curated_gene_disease_associations.tsv.gz',
'description': 'The file contains gene-disease associations from UNIPROT, CTD (human subset), ClinVar, Orphanet,'
' and the GWAS Catalog.',
'header': 21
},
'snp_disgenet': {
'full_name': 'All SNP-Gene-Disease Associations',
'url': 'http://www.disgenet.org/ds/DisGeNET/results/all_snps_sentences_pubmeds.tsv.gz',
'description': 'All SNP-gene-disease associations.',
'header': 20
},
}
# ---------------------------------------------------------------------------------------------
# Tools for Harvesting DisGeNET Data
# ---------------------------------------------------------------------------------------------
class DisgenetInterface(object):
"""
Python Interface for Harvesting Databases from `DisGeNET <http://www.disgenet.org/>`_.
:param cache_path: location of the BioVida cache. If one does not exist in this location, one will created.
Default to ``None`` (which will generate a cache in the home folder).
:type cache_path: ``str`` or ``None``
:param verbose: If ``True``, print notice when downloading database. Defaults to ``True``.
:type verbose: ``bool``
"""
@staticmethod
def _disgenet_readme(created_gene_dirs):
"""
Writes the DisGeNET README to disk.
:param created_gene_dirs: the dictionary of directories returned by ``_package_cache_creator()``
:type created_gene_dirs: ``dict``
"""
save_address = os.path.join(created_gene_dirs['disgenet'], 'DisGeNET_README.txt')
if not os.path.isfile(save_address):
readme_url = 'http://www.disgenet.org/ds/DisGeNET/results/readme.txt'
r = requests.get(readme_url, stream=True)
with open(save_address, 'wb') as f:
f.write(r.content)
header("The DisGeNET README has been downloaded to:\n\n {0}\n\n"
"Please take the time to review this document.".format(save_address),
flank=False)
def __init__(self, cache_path=None, verbose=True):
"""
Initialize the ``DisgenetInterface()`` Class.
"""
self._verbose = verbose
# Cache Creation
ppc = package_cache_creator(sub_dir='genomics',
cache_path=cache_path,
to_create=['disgenet'],
verbose=verbose)
self.root_path, self._created_gene_dirs = ppc
# Check if a readme exists.
self._disgenet_readme(self._created_gene_dirs)
# Containers for the most recently requested database.
self.current_database = None
self.current_database_name = None
self.current_database_full_name = None
self.current_database_description = None
@staticmethod
def _disgenet_delimited_databases_key_error(database):
"""
Raises an error when an reference is made to a database not in `_disgenet_delimited_databases.keys()`.
:param database: `erroneous` database reference.
:type database: ``str``
"""
if database not in _disgenet_delimited_databases:
raise ValueError(
"'{0}' is an invalid value for `database`.\n`database` must be one of:\n{1}".format(
str(database), list_to_bulletpoints(_disgenet_delimited_databases.keys())))
def options(self, database=None, pretty_print=True):
"""
Disgenet databases which can be downloaded
as well as additional information about the databases.
:param database: A database to review. Must be one of: 'all', 'curated', 'snp_disgenet' or ``None``.
If a specific database is given, the database's full name and description will be provided.
If ``None``, a list of databases which can be downloaded will be returned (or printed).
Defaults to ``None``.
:type database: ``str``
:param pretty_print: pretty print the information. Defaults to True.
:type pretty_print: ``bool``
:return: a ``list`` if `database` is ``None``, else a ``dict`` with the database's full name and description.
:rtype: ``list`` or ``dict``
"""
if database is None:
info = list(_disgenet_delimited_databases.keys())
elif database in _disgenet_delimited_databases:
info = {k: v for k, v in _disgenet_delimited_databases[database].items()
if k in ['full_name', 'description']}
else:
self._disgenet_delimited_databases_key_error(database)
if pretty_print:
if database is None:
print("Available Databases:\n")
print(list_to_bulletpoints(info))
else:
dict_pprint(info)
else:
return info
@staticmethod
def _df_clean(data_frame):
"""
Clean the dataframe generated by ``pull()``
:param data_frame:
:type data_frame: ``Pandas DataFrame``
:return: see description.
:rtype: ``Pandas DataFrame``
"""
# Lower to make easier to match in the future
data_frame['diseaseName'] = data_frame['diseaseName'].map(
lambda x: x.lower() if isinstance(x, str) else x, na_action='ignore')
data_frame.columns = list(map(camel_to_snake_case, data_frame.columns))
return data_frame
def pull(self, database, download_override=False):
"""
Pull (i.e., download) a DisGeNET Database.
Note: if a database is already cached, it will be used instead of downloading
(the `download_override` argument can be used override this behaviour).
:param database: A database to download. Must be one of: 'all', 'curated', 'snp_disgenet' or ``None``.
See ``options()`` for more information.
:type database: ``str``
:param download_override: If ``True``, override any existing database currently cached and download a new one.
Defaults to ``False``.
:type download_override: ``bool``
:return: a DisGeNET database
:rtype: ``Pandas DataFrame``
"""
self._disgenet_delimited_databases_key_error(database)
db_url = _disgenet_delimited_databases[database]['url']
save_name = "{0}.p".format(db_url.split("/")[-1].split(".")[0])
save_address = os.path.join(self._created_gene_dirs['disgenet'], save_name)
if download_override or not os.path.isfile(save_address):
if self._verbose:
header("Downloading DisGeNET Database... ", flank=False)
data_frame = pd.read_csv(db_url,
sep='\t',
header=_disgenet_delimited_databases[database]['header'],
compression='gzip')
self._df_clean(data_frame).to_pickle(save_address)
else:
data_frame = pd.read_pickle(save_address)
# Cache the database
self.current_database = data_frame
self.current_database_name = database
self.current_database_full_name = _disgenet_delimited_databases[database]['full_name']
self.current_database_description = _disgenet_delimited_databases[database]['description']
return data_frame
|
TariqAHassan/BioVida
|
biovida/genomics/disgenet_interface.py
|
Python
|
bsd-3-clause
| 8,839
|
# test explicit global within function within function with local of same name
x = 2
def f():
x = 3
def g():
global x
print x
x = 4
print x
g()
f()
print x
|
jplevyak/pyc
|
tests/scoping2.py
|
Python
|
bsd-3-clause
| 210
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Tools
http://en.wikipedia.org/wiki/Haversine_formula
ToDo: ToFix / ToTest
"""
import math
def waypoint_bearing(lat1, lon1, lat2, lon2):
"""
Calculates the bearing between 2 locations.
Method calculates the bearing between 2 locations.
@param lon1 First point longitude.
@param lat1 First point latitude.
@param lon2 Second point longitude.
@param lat2 Second point latitude.
@return The bearing between 2 locations.
"""
longitude1 = math.radians(lon1)
latitude1 = math.radians(lat1)
longitude2 = math.radians(lon2)
latitude2 = math.radians(lat2)
clat1 = math.cos(latitude1)
clat2 = math.cos(latitude2)
dlon = longitude2 - longitude1
y = math.sin(dlon) * clat2
x = clat1 * math.sin(latitude2) - math.sin(latitude1) * clat2 * math.cos(dlon)
if x==0 and y==0:
return(0.0)
else:
return((360 + math.degrees(math.atan2(y, x)) + 0.5) % 360.0)
def haversine_bearing(lat1, lon1, lat2, lon2):
"""
Calculate the bearing from 1 point to 1 other
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
dlon = lon2 - lon1
b = math.atan2(math.sin(dlon) * math.cos(lat2),
math.cos(lat1) * math.sin(lat2)
- math.sin(lat1) * math.cos(lat2) * math.cos(dlon)) # bearing calc
bd = math.degrees(b)
br, bn = divmod(bd + 360, 360) # the bearing remainder and final bearing
return bn
def haversine_distance(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
r = 6371.0 # Radius of earth in kilometers. Use 3956 for miles
return(c * r)
def main():
# Just some tests (should be removed)
(lon1, lat1, lon2, lat2) = (45.0, 1.0, 45.5, 2.0)
bearing = waypoint_bearing(lon1, lat1, lon2, lat2)
print(bearing)
bearing = haversine_bearing(lon1, lat1, lon2, lat2)
print(bearing)
if __name__ == '__main__':
main()
|
scls19fr/pycondor
|
pycondor/tools.py
|
Python
|
bsd-3-clause
| 2,458
|
# -*- coding: utf-8 -*-
#
# cmapR documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 24 11:48:54 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cmapR'
copyright = u'2017, Connectivity Map at Broad Institute of MIT and Harvard, Inc.'
author = u'Ted Natoli'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'cmapR v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'cmapRdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cmapR.tex', u'cmapR Documentation',
u'Ted Natoli', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cmapr', u'cmapR Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cmapR', u'cmapR Documentation',
author, 'cmapR', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
|
cmap/cmapR
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 11,886
|
#!/usr/bin/env python
#encoding: utf8
import rospy, actionlib
from std_msgs.msg import UInt16
from pimouse_ros.msg import MusicAction, MusicResult, MusicFeedback # 行を追加
def write_freq(hz=0):
bfile = "/dev/rtbuzzer0"
try:
with open(bfile,"w") as f:
f.write(str(hz) + "\n")
except IOError:
rospy.logerr("can't write to " + bfile)
def exec_music(goal): pass # 追加
def recv_buzzer(data):
write_freq(data.data)
if __name__ == '__main__':
rospy.init_node('buzzer')
rospy.Subscriber("buzzer", UInt16, recv_buzzer)
music = actionlib.SimpleActionServer('music', MusicAction, exec_music, False) # 追加
music.start() # 追加
rospy.on_shutdown(write_freq) # 追加
rospy.spin()
# Copyright 2016 Ryuichi Ueda
# Released under the BSD License.
# To make line numbers be identical with the book, this statement is written here. Don't move it to the header.
|
oguran/pimouse_ros
|
scripts/buzzer4.py
|
Python
|
bsd-3-clause
| 1,047
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.models import Page, Title
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugins.text.models import Text
from cms.sitemaps import CMSSitemap
from cms.test.testcases import CMSTestCase, URL_CMS_PAGE, URL_CMS_PAGE_ADD
from cms.test.util.context_managers import LanguageOverride, SettingsOverride
from cms.utils.page_resolver import get_page_from_request
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import HttpRequest
import os.path
class PagesTestCase(CMSTestCase):
def setUp(self):
u = User(username="test", is_staff = True, is_active = True, is_superuser = True)
u.set_password("test")
u.save()
self.login_user(u)
def test_01_add_page(self):
"""
Test that the add admin page could be displayed via the admin
"""
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 200)
def test_02_create_page(self):
"""
Test that a page can be created via the admin
"""
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
title = Title.objects.get(slug=page_data['slug'])
self.assertNotEqual(title, None)
page = title.page
page.published = True
page.save()
self.assertEqual(page.get_title(), page_data['title'])
self.assertEqual(page.get_slug(), page_data['slug'])
self.assertEqual(page.placeholders.all().count(), 2)
# were public instanes created?
title = Title.objects.drafts().get(slug=page_data['slug'])
def test_03_slug_collision(self):
"""
Test a slug collision
"""
page_data = self.get_new_page_data()
# create first page
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
#page1 = Title.objects.get(slug=page_data['slug']).page
# create page with the same page_data
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
if settings.i18n_installed:
self.assertEqual(response.status_code, 302)
# did we got right redirect?
self.assertEqual(response['Location'].endswith(URL_CMS_PAGE), True)
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Location'].endswith(URL_CMS_PAGE_ADD), True)
# TODO: check for slug collisions after move
# TODO: check for slug collisions with different settings
def test_04_details_view(self):
"""
Test the details view
"""
response = self.client.get(self.get_pages_root())
self.assertEqual(response.status_code, 404)
page = self.create_page(title='test page 1', published=False)
response = self.client.get(self.get_pages_root())
self.assertEqual(response.status_code, 404)
self.assertTrue(page.publish())
with_parent = self.create_page(parent_page=page, title='test page 2', published=True)
homepage = Page.objects.get_home()
self.assertTrue(homepage.get_slug(), 'test-page-1')
response = self.client.get(self.get_pages_root())
self.assertEqual(response.status_code, 200)
def test_05_edit_page(self):
"""
Test that a page can edited via the admin
"""
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.get(title_set__slug=page_data['slug'])
response = self.client.get('/admin/cms/page/%s/' %page.id)
self.assertEqual(response.status_code, 200)
page_data['title'] = 'changed title'
response = self.client.post('/admin/cms/page/%s/' %page.id, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEqual(page.get_title(), 'changed title')
def test_06_meta_description_and_keywords_fields_from_admin(self):
"""
Test that description and keywords tags can be set via the admin
"""
page_data = self.get_new_page_data()
page_data["meta_description"] = "I am a page"
page_data["meta_keywords"] = "page,cms,stuff"
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.get(title_set__slug=page_data['slug'])
response = self.client.get('/admin/cms/page/%s/' %page.id)
self.assertEqual(response.status_code, 200)
page_data['meta_description'] = 'I am a duck'
response = self.client.post('/admin/cms/page/%s/' %page.id, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
page = Page.objects.get(title_set__slug=page_data["slug"])
self.assertEqual(page.get_meta_description(), 'I am a duck')
self.assertEqual(page.get_meta_keywords(), 'page,cms,stuff')
def test_07_meta_description_and_keywords_from_template_tags(self):
from django import template
page_data = self.get_new_page_data()
page_data["title"] = "Hello"
page_data["meta_description"] = "I am a page"
page_data["meta_keywords"] = "page,cms,stuff"
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.get(title_set__slug=page_data['slug'])
self.client.post('/admin/cms/page/%s/' %page.id, page_data)
t = template.Template("{% load cms_tags %}{% page_attribute title %} {% page_attribute meta_description %} {% page_attribute meta_keywords %}")
req = HttpRequest()
page.published = True
page.save()
req.current_page = page
req.REQUEST = {}
self.assertEqual(t.render(template.Context({"request": req})), "Hello I am a page page,cms,stuff")
def test_08_copy_page(self):
"""
Test that a page can be copied via the admin
"""
page_a = self.create_page()
page_a_a = self.create_page(page_a)
page_a_a_a = self.create_page(page_a_a)
page_b = self.create_page()
page_b_a = self.create_page(page_b)
count = Page.objects.drafts().count()
self.copy_page(page_a, page_b_a)
self.assertEqual(Page.objects.drafts().count() - count, 3)
def test_09_language_change(self):
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
pk = Page.objects.all()[0].pk
response = self.client.get("/admin/cms/page/%s/" % pk, {"language":"en" })
self.assertEqual(response.status_code, 200)
response = self.client.get("/admin/cms/page/%s/" % pk, {"language":"de" })
self.assertEqual(response.status_code, 200)
def test_10_move_page(self):
page_data1 = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data1)
page_data2 = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data2)
page_data3 = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data3)
page1 = Page.objects.all()[0]
page2 = Page.objects.all()[1]
page3 = Page.objects.all()[2]
# move pages
response = self.client.post("/admin/cms/page/%s/move-page/" % page3.pk, {"target":page2.pk, "position":"last-child" })
self.assertEqual(response.status_code, 200)
response = self.client.post("/admin/cms/page/%s/move-page/" % page2.pk, {"target":page1.pk, "position":"last-child" })
self.assertEqual(response.status_code, 200)
# check page2 path and url
page2 = Page.objects.get(pk=page2.pk)
self.assertEqual(page2.get_path(), page_data1['slug']+"/"+page_data2['slug'])
self.assertEqual(page2.get_absolute_url(), self.get_pages_root()+page_data1['slug']+"/"+page_data2['slug']+"/")
# check page3 path and url
page3 = Page.objects.get(pk=page3.pk)
self.assertEqual(page3.get_path(), page_data1['slug']+"/"+page_data2['slug']+"/"+page_data3['slug'])
self.assertEqual(page3.get_absolute_url(), self.get_pages_root()+page_data1['slug']+"/"+page_data2['slug']+"/"+page_data3['slug']+"/")
# publish page 1 (becomes home)
page1 = Page.objects.all()[0]
page1.published = True
page1.save()
# check that page2 and page3 url have changed
page2 = Page.objects.get(pk=page2.pk)
self.assertEqual(page2.get_absolute_url(), self.get_pages_root()+page_data2['slug']+"/")
page3 = Page.objects.get(pk=page3.pk)
self.assertEqual(page3.get_absolute_url(), self.get_pages_root()+page_data2['slug']+"/"+page_data3['slug']+"/")
# move page2 back to root and check path of 2 and 3
response = self.client.post("/admin/cms/page/%s/move-page/" % page2.pk, {"target":page1.pk, "position":"left" })
self.assertEqual(response.status_code, 200)
page2 = Page.objects.get(pk=page2.pk)
self.assertEqual(page2.get_path(), page_data2['slug'])
page3 = Page.objects.get(pk=page3.pk)
self.assertEqual(page3.get_path(), page_data2['slug']+"/"+page_data3['slug'])
def test_11_add_placeholder(self):
# create page
page = self.create_page(None, None, "last-child", "Add Placeholder", 1, True, True)
page.template = 'add_placeholder.html'
page.save()
url = page.get_absolute_url()
response = self.client.get(url)
self.assertEqual(200, response.status_code)
path = os.path.join(settings.PROJECT_DIR, 'templates', 'add_placeholder.html')
f = open(path, 'r')
old = f.read()
f.close()
new = old.replace(
'<!-- SECOND_PLACEHOLDER -->',
'{% placeholder second_placeholder %}'
)
f = open(path, 'w')
f.write(new)
f.close()
response = self.client.get(url)
self.assertEqual(200, response.status_code)
f = open(path, 'w')
f.write(old)
f.close()
def test_12_sitemap_login_required_pages(self):
"""
Test that CMSSitemap object contains only published,public (login_required=False) pages
"""
self.create_page(parent_page=None, published=True, in_navigation=True)
page1 = Page.objects.all()[0]
page1.login_required = True
page1.save()
self.assertEqual(CMSSitemap().items().count(),0)
def test_13_edit_page_other_site_and_language(self):
"""
Test that a page can edited via the admin when your current site is
different from the site you are editing and the language isn't available
for the current site.
"""
site = Site.objects.create(domain='otherlang', name='otherlang')
# Change site for this session
page_data = self.get_new_page_data()
page_data['site'] = site.pk
page_data['title'] = 'changed title'
TESTLANG = settings.CMS_SITE_LANGUAGES[site.pk][0]
page_data['language'] = TESTLANG
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
page = Page.objects.get(title_set__slug=page_data['slug'])
with LanguageOverride(TESTLANG):
self.assertEqual(page.get_title(), 'changed title')
def test_14_flat_urls(self):
with SettingsOverride(CMS_FLAT_URLS=True):
home_slug = "home"
child_slug = "child"
grandchild_slug = "grandchild"
home = self.create_page(
title=home_slug,
published=True,
in_navigation=True
)
home.publish()
child = self.create_page(
parent_page=home,
title=child_slug,
published=True,
in_navigation=True
)
child.publish()
grandchild = self.create_page(
parent_page=child,
title=grandchild_slug,
published=True,
in_navigation=True
)
grandchild.publish()
response = self.client.get(home.get_absolute_url())
self.assertEqual(response.status_code, 200)
response = self.client.get(child.get_absolute_url())
self.assertEqual(response.status_code, 200)
response = self.client.get(grandchild.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertFalse(child.get_absolute_url() in grandchild.get_absolute_url())
def test_15_templates(self):
"""
Test the inheritance magic for templates
"""
parent = self.create_page()
child = self.create_page(parent)
child.template = settings.CMS_TEMPLATE_INHERITANCE_MAGIC
child.save()
self.assertEqual(child.template, settings.CMS_TEMPLATE_INHERITANCE_MAGIC)
self.assertEqual(parent.get_template_name(), child.get_template_name())
parent.template = settings.CMS_TEMPLATE_INHERITANCE_MAGIC
parent.save()
self.assertEqual(parent.template, settings.CMS_TEMPLATE_INHERITANCE_MAGIC)
self.assertEqual(parent.get_template(), settings.CMS_TEMPLATES[0][0])
self.assertEqual(parent.get_template_name(), settings.CMS_TEMPLATES[0][1])
def test_16_delete_with_plugins(self):
"""
Check that plugins and placeholders get correctly deleted when we delete
a page!
"""
page = self.create_page()
page.rescan_placeholders() # create placeholders
placeholder = page.placeholders.all()[0]
plugin_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=settings.LANGUAGES[0][0]
)
plugin_base.insert_at(None, position='last-child', save=False)
plugin = Text(body='')
plugin_base.set_base_attr(plugin)
plugin.save()
self.assertEqual(CMSPlugin.objects.count(), 1)
self.assertEqual(Text.objects.count(), 1)
self.assertTrue(Placeholder.objects.count() > 0)
page.delete()
self.assertEqual(CMSPlugin.objects.count(), 0)
self.assertEqual(Text.objects.count(), 0)
self.assertEqual(Placeholder.objects.count(), 0)
def test_17_get_page_from_request_on_non_cms_admin(self):
request = self.get_request(
reverse('admin:sampleapp_category_change', args=(1,))
)
page = get_page_from_request(request)
self.assertEqual(page, None)
def test_18_get_page_from_request_on_cms_admin(self):
page = self.create_page()
request = self.get_request(
reverse('admin:cms_page_change', args=(page.pk,))
)
found_page = get_page_from_request(request)
self.assertTrue(found_page)
self.assertEqual(found_page.pk, page.pk)
def test_19_get_page_from_request_on_cms_admin_nopage(self):
request = self.get_request(
reverse('admin:cms_page_change', args=(1,))
)
page = get_page_from_request(request)
self.assertEqual(page, None)
def test_20_get_page_from_request_cached(self):
mock_page = 'hello world'
request = self.get_request(
reverse('admin:sampleapp_category_change', args=(1,))
)
request._current_page_cache = mock_page
page = get_page_from_request(request)
self.assertEqual(page, mock_page)
def test_21_get_page_from_request_nopage(self):
request = self.get_request('/')
page = get_page_from_request(request)
self.assertEqual(page, None)
def test_22_get_page_from_request_with_page_404(self):
page = self.create_page(published=True)
page.publish()
request = self.get_request('/does-not-exist/')
found_page = get_page_from_request(request)
self.assertEqual(found_page, None)
def test_23_get_page_from_request_with_page_preview(self):
page = self.create_page()
request = self.get_request('%s?preview' % page.get_absolute_url())
found_page = get_page_from_request(request)
self.assertEqual(found_page, None)
superuser = self.get_superuser()
with self.login_user_context(superuser):
request = self.get_request('%s?preview&draft' % page.get_absolute_url())
found_page = get_page_from_request(request)
self.assertTrue(found_page)
self.assertEqual(found_page.pk, page.pk)
class NoAdminPageTests(CMSTestCase):
urls = 'testapp.noadmin_urls'
def setUp(self):
admin = 'django.contrib.admin'
noadmin_apps = [app for app in settings.INSTALLED_APPS if not app == admin]
self._ctx = SettingsOverride(INSTALLED_APPS=noadmin_apps)
self._ctx.__enter__()
def tearDown(self):
self._ctx.__exit__(None, None, None)
def test_01_get_page_from_request_fakeadmin_nopage(self):
request = self.get_request('/admin/')
page = get_page_from_request(request)
self.assertEqual(page, None)
|
jalaziz/django-cms-grappelli-old
|
cms/tests/page.py
|
Python
|
bsd-3-clause
| 17,666
|
# -*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms.fields import TextField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from purchasing.users.models import Department
class DepartmentForm(Form):
'''Allows user to update profile information
Attributes:
department: sets user department based on choice of available
departments or none value
first_name: sets first_name value based on user input
last_name: sets last_name value based on user input
'''
department = QuerySelectField(
query_factory=Department.query_factory,
get_pk=lambda i: i.id,
get_label=lambda i: i.name,
allow_blank=True, blank_text='-----'
)
first_name = TextField()
last_name = TextField()
|
codeforamerica/pittsburgh-purchasing-suite
|
purchasing/users/forms.py
|
Python
|
bsd-3-clause
| 786
|
# c: 07.05.2007, r: 25.06.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
dim = 2
field_1 = {
'name' : 'a_harmonic_field',
'dtype' : 'real',
'shape' : 'scalar',
'region' : 'Omega',
'approx_order' : 1,
}
variables = {
't': ('unknown field', 'a_harmonic_field', 0),
's': ('test field', 'a_harmonic_field', 't'),
}
regions = {
'Omega' : 'all',
'Gamma' : ('vertices of surface', 'facet'),
}
ebcs = {
't_left' : ('Gamma', {'t.0' : 'ebc'}),
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
coef = 2.0
materials = {
'coef' : ({'val' : coef},),
'rhs' : 'rhs',
}
equations = {
'Temperature' :
"""dw_laplace.i.Omega( coef.val, s, t )
= - dw_volume_lvf.i.Omega( rhs.val, s )""",
}
solutions = {
'sincos' : ('t', 'sin( 3.0 * x ) * cos( 4.0 * y )',
'-25.0 * %s * sin( 3.0 * x ) * cos( 4.0 * y )' % coef),
'poly' : ('t', '(x**2) + (y**2)', '4.0 * %s' % coef),
'polysin' : ('t', '((x - 0.5)**3) * sin( 5.0 * y )',
'%s * (6.0 * (x - 0.5) * sin( 5.0 * y ) - 25.0 * ((x - 0.5)**3) * sin( 5.0 * y ))' % coef),
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
import numpy as nm
from sfepy.base.testing import TestCommon
from sfepy.base.base import debug, pause, assert_
output_name = 'test_msm_laplace_%s.vtk'
##
# c: 07.05.2007, r: 09.05.2008
solution = ['']
def ebc(ts, coor, **kwargs):
expression = solution[0]
val = TestCommon.eval_coor_expression( expression, coor )
return nm.atleast_1d( val )
def rhs(ts, coor, mode=None, expression=None, **kwargs):
if mode == 'qp':
if expression is None:
expression = '0.0 * x'
val = TestCommon.eval_coor_expression( expression, coor )
val.shape = (val.shape[0], 1, 1)
return {'val' : val}
functions = {
'ebc' : (ebc,),
'rhs' : (rhs,),
}
##
# c: 07.05.2008
class Test( TestCommon ):
##
# c: 07.05.2007, r: 07.05.2008
def from_conf( conf, options ):
from sfepy.discrete import Problem
problem = Problem.from_conf(conf)
test = Test( problem = problem,
conf = conf, options = options )
return test
from_conf = staticmethod( from_conf )
##
# c: 09.05.2007, r: 25.06.2008
def _build_rhs( self, sols ):
for sol in sols.itervalues():
assert_( len( sol ) == 3 )
return sols
##
# c: 07.05.2007, r: 09.05.2008
def test_msm_laplace( self ):
import os.path as op
problem = self.problem
variables = problem.get_variables()
materials = problem.get_materials()
sols = self._build_rhs( self.conf.solutions )
ok = True
for sol_name, sol in sols.iteritems():
self.report( 'testing', sol_name )
var_name, sol_expr, rhs_expr = sol
self.report( 'sol:', sol_expr )
self.report( 'rhs:', rhs_expr )
globals()['solution'][0] = sol_expr
materials['rhs'].function.set_extra_args(expression=rhs_expr)
problem.time_update()
state = problem.solve()
coor = variables[var_name].field.get_coor()
ana_sol = self.eval_coor_expression( sol_expr, coor )
num_sol = state(var_name)
ana_norm = nm.linalg.norm( ana_sol, nm.inf )
ret = self.compare_vectors( ana_sol, num_sol,
allowed_error = ana_norm * 1e-2,
label1 = 'analytical %s' % var_name,
label2 = 'numerical %s' % var_name,
norm = nm.inf )
if not ret:
self.report( 'variable %s: failed' % var_name )
fname = op.join( self.options.out_dir, self.conf.output_name )
out = {}
astate = state.copy()
astate.set_full(ana_sol)
aux = astate.create_output_dict()
out['ana_t'] = aux['t']
aux = state.create_output_dict()
out['num_t'] = aux['t']
problem.domain.mesh.write( fname % sol_name, io = 'auto', out = out )
ok = ok and ret
return ok
|
RexFuzzle/sfepy
|
tests/test_msm_laplace.py
|
Python
|
bsd-3-clause
| 4,418
|
from bokeh.plotting import figure, show
p = figure(width=400, height=400)
p.block(x=[1, 2, 3], y=[1, 2, 3], width=[0.2, 0.5, 0.1], height=1.5)
show(p)
|
bokeh/bokeh
|
sphinx/source/docs/user_guide/examples/plotting_rectangles_block.py
|
Python
|
bsd-3-clause
| 153
|
#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory method to retrieve the appropriate port implementation."""
import fnmatch
import optparse
import re
from webkitpy.layout_tests.port import builders
def platform_options(use_globs=False):
return [
optparse.make_option('--platform', action='store',
help=('Glob-style list of platform/ports to use (e.g., "mac*")' if use_globs else 'Platform to use (e.g., "mac-lion")')),
optparse.make_option('--chromium', action='store_const', dest='platform',
const=('chromium*' if use_globs else 'chromium'),
help=('Alias for --platform=chromium*' if use_globs else 'Alias for --platform=chromium')),
optparse.make_option('--chromium-android', action='store_const', dest='platform',
const=('chromium-android*' if use_globs else 'chromium-android'),
help=('Alias for --platform=chromium-android*' if use_globs else 'Alias for --platform=chromium')),
optparse.make_option('--efl', action='store_const', dest='platform',
const=('efl*' if use_globs else 'efl'),
help=('Alias for --platform=efl*' if use_globs else 'Alias for --platform=efl')),
optparse.make_option('--gtk', action='store_const', dest='platform',
const=('gtk*' if use_globs else 'gtk'),
help=('Alias for --platform=gtk*' if use_globs else 'Alias for --platform=gtk')),
optparse.make_option('--qt', action='store_const', dest="platform",
const=('qt*' if use_globs else 'qt'),
help=('Alias for --platform=qt' if use_globs else 'Alias for --platform=qt')),
]
def configuration_options():
return [
optparse.make_option("-t", "--target", dest="configuration", help="(DEPRECATED)"),
# FIXME: --help should display which configuration is default.
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option('--32-bit', action='store_const', const='x86', default=None, dest="architecture",
help='use 32-bit binaries by default (x86 instead of x86_64)'),
]
def _builder_options(builder_name):
configuration = "Debug" if re.search(r"[d|D](ebu|b)g", builder_name) else "Release"
is_webkit2 = builder_name.find("WK2") != -1
builder_name = builder_name
return optparse.Values({'builder_name': builder_name, 'configuration': configuration, 'webkit_test_runner': is_webkit2})
class PortFactory(object):
PORT_CLASSES = (
'chromium_android.ChromiumAndroidPort',
'chromium_linux.ChromiumLinuxPort',
'chromium_mac.ChromiumMacPort',
'chromium_win.ChromiumWinPort',
'efl.EflPort',
'gtk.GtkPort',
'mac.MacPort',
'mock_drt.MockDRTPort',
'qt.QtPort',
'test.TestPort',
'win.WinPort',
)
def __init__(self, host):
self._host = host
def _default_port(self, options):
platform = self._host.platform
if platform.is_linux() or platform.is_freebsd():
return 'chromium-linux'
elif platform.is_mac():
return 'mac'
elif platform.is_win():
return 'win'
raise NotImplementedError('unknown platform: %s' % platform)
def get(self, port_name=None, options=None, **kwargs):
"""Returns an object implementing the Port interface. If
port_name is None, this routine attempts to guess at the most
appropriate port on this platform."""
port_name = port_name or self._default_port(options)
# FIXME(dpranke): We special-case '--platform chromium' so that it can co-exist
# with '--platform chromium-mac' and '--platform chromium-linux' properly (we
# can't look at the port_name prefix in this case).
if port_name == 'chromium':
port_name = 'chromium-' + self._host.platform.os_name
for port_class in self.PORT_CLASSES:
module_name, class_name = port_class.rsplit('.', 1)
module = __import__(module_name, globals(), locals(), [], -1)
cls = module.__dict__[class_name]
if port_name.startswith(cls.port_name):
port_name = cls.determine_full_port_name(self._host, options, port_name)
return cls(self._host, port_name, options=options, **kwargs)
raise NotImplementedError('unsupported platform: "%s"' % port_name)
def all_port_names(self, platform=None):
"""Return a list of all valid, fully-specified, "real" port names.
This is the list of directories that are used as actual baseline_paths()
by real ports. This does not include any "fake" names like "test"
or "mock-mac", and it does not include any directories that are not.
If platform is not specified, we will glob-match all ports"""
platform = platform or '*'
return fnmatch.filter(builders.all_port_names(), platform)
def get_from_builder_name(self, builder_name):
port_name = builders.port_name_for_builder_name(builder_name)
assert port_name, "unrecognized builder name '%s'" % builder_name
return self.get(port_name, _builder_options(builder_name))
|
leighpauls/k2cro4
|
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/factory.py
|
Python
|
bsd-3-clause
| 6,966
|
from pytest import fixture, mark
from ..generic import GenericOAuthenticator
from .mocks import setup_oauth_mock
def user_model(username):
"""Return a user model"""
return {
'username': username,
'scope': 'basic',
}
def Authenticator():
return GenericOAuthenticator(
token_url='https://generic.horse/oauth/access_token',
userdata_url='https://generic.horse/oauth/userinfo'
)
@fixture
def generic_client(client):
setup_oauth_mock(client,
host='generic.horse',
access_token_path='/oauth/access_token',
user_path='/oauth/userinfo',
)
return client
@mark.gen_test
def test_generic(generic_client):
authenticator = Authenticator()
handler = generic_client.handler_for_user(user_model('wash'))
user_info = yield authenticator.authenticate(handler)
assert sorted(user_info) == ['auth_state', 'name']
name = user_info['name']
assert name == 'wash'
auth_state = user_info['auth_state']
assert 'access_token' in auth_state
assert 'oauth_user' in auth_state
assert 'refresh_token' in auth_state
assert 'scope' in auth_state
|
enolfc/oauthenticator
|
oauthenticator/tests/test_generic.py
|
Python
|
bsd-3-clause
| 1,152
|
# -*- coding: utf-8 -*-
__author__ = "Daniel Roy Greenfeld"
__email__ = "pydanny@gmail.com"
__version__ = "0.6.1"
import os
import sys
try: # Forced testing
from shutil import which
except ImportError: # Forced testing
# Versions prior to Python 3.3 don't have shutil.which
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
Note: This function was backported from the Python 3 source code.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
|
pydanny/whichcraft
|
whichcraft.py
|
Python
|
bsd-3-clause
| 2,881
|
import sys, os
import json
import re
import yarp
from collections import OrderedDict
# Global variables for the names of the input and output ports
local_in_port_name = ""
local_out_port_name = ""
local_GPS_port_name = ""
local_Status_port_name = ""
local_Dest_port_name = ""
local_Heal_port_name = ""
local_Proximity_in_port_name = ""
local_Proximity_out_port_name = ""
# Global variables for the ors names of the ports
ors_in_port_name = ""
ors_out_port_name = ""
ors_GPS_port_name = ""
ors_Status_port_name = ""
ors_Dest_port_name = ""
ors_Heal_port_name = ""
ors_Proximity_in_port_name = ""
ors_Proximity_in_port_name = ""
# Global variables for the yarp ports
local_in_port = 0
local_out_port = 0
local_GPS_port = 0
local_Status_port = 0
local_Dest_port = 0
local_Heal_port = 0
def read_waypoints():
""" Read a list of waypoints from a file.
Format the data as a list of dictionaries,
in the same format as the java objects from ROSACE."""
filename = "rosace-waypoints.txt"
file = open(filename, "r")
wp_list = []
for line in file:
# Get the individual elements, splitting by whitespace
data_list = line.split()
print (data_list)
coordinate = OrderedDict( [ ('x', data_list[0]), ('y', data_list[1]), ('z', data_list[2]) ] )
waypoint = OrderedDict( [ ('point', coordinate), ('radius', data_list[3]) ] )
#waypoint = OrderedDict( [ ('x', data_list[0]), ('y', data_list[1]), ('z', data_list[2]), ('speed', data_list[3]) ] )
wp_list.append (waypoint)
return wp_list
def command_robot():
waiting = True
wp_list = read_waypoints ()
while waiting:
"""
yarp_data = local_in_port.read(False)
if yarp_data != None:
# Read a string from the bottle
json_data = yarp_data.toString()
data = decode_message(json_data)
print ("Current robot status:")
for key, value in data.items():
print ("\t{0}:\t{1}".format(key, value))
"""
#raw_coords = raw_input("Input new coordinates: ")
#print ("The coordinates read are: {0}, of type ({1})".format(raw_coords, type(raw_coords)))
command = input("Enter command: [ (g)ps / (n)eighbours / s(t)atus / (w)aypoint / (h)eal / e(x)it ] ")
#command = input("Enter command: [ (m)ove / (s)top / s(t)atus / (w)aypoint / (n)eighbours / (h)eal / e(x)it ] ")
if command == "move" or command == "m":
command = {"command":"move"}
send_command(command)
elif command == "stop" or command == "s":
command = {"command":"stop"}
send_command(command)
elif command == "waypoint" or command == "w":
wp = wp_list.pop(0)
message = json.dumps (wp)
print ("Next waypoint: {0}".format(message))
# Send the json string through a yarp port
bottle = local_Dest_port.prepare()
bottle.clear()
bottle.addString(message)
local_Dest_port.write()
elif command == "heal" or command == "h":
command = {'heal': 1}
message = json.dumps (command)
# Send the json string through a yarp port
bottle = local_Heal_port.prepare()
bottle.clear()
bottle.addString(message)
local_Heal_port.write()
elif command == "neighbours" or command == "n" or \
command == "radius" or command == "r":
# Separate commands for the radio sensor
#if command == "neighbours" or command == "n":
#command = {"request": "Neighbours"}
#elif command == "radius" or command == "r":
#command = {"range": "10"}
#message = json.dumps (command)
# Send the json string through a yarp port
#bottle = local_Proximity_out_port.prepare()
#bottle.clear()
#bottle.addString(message)
#local_Proximity_out_port.write()
# Read the response
yarp_data = local_Proximity_in_port.read(False)
if yarp_data != None:
# Read a string from the bottle
json_data = yarp_data.toString()
data = decode_message(json_data)
print ("Current robot neighbours:")
for key, value in data.items():
print ("\t{0}:\t{1}".format(key, value))
elif command == "gps" or command == "g":
# Read the response
yarp_data = local_GPS_port.read(False)
if yarp_data != None:
# Read a string from the bottle
json_data = yarp_data.toString()
data = decode_message(json_data)
print ("Current robot location:")
for key, value in data.items():
print ("\t{0}:\t{1}".format(key, value))
elif command == "status" or command == "t":
# Read the response
yarp_data = local_Status_port.read(False)
if yarp_data != None:
# Read a string from the bottle
json_data = yarp_data.toString()
data = decode_message(json_data)
print ("Current robot status:")
for key, value in data.items():
print ("\t{0}:\t{1}".format(key, value))
elif command == "exit" or command == "x":
print ("Exiting the function")
sys.exit()
def send_command(command):
""" Send a message through the yarp output port."""
message = json.dumps (command)
bottle = local_out_port.prepare()
bottle.clear()
bottle.addString(message)
local_out_port.write(False)
def decode_message(json_data):
""" Decode a data structure using JSON.
The data is initially a string.
Returns a Python object,
either an int, double, string, list or dictionary."""
# Remove the quotations at the start and end of the string
json_data = re.sub(r'"(.*)"', r'\1', json_data)
# Unescape all other quotation marks
json_data = re.sub(r'\\(.)', r'\1', json_data)
clean_data = json.loads(json_data)
return clean_data
def port_setup(robot_name):
""" Open the input and output ports."""
global local_in_port
global local_out_port
global local_GPS_port
global local_Status_port
global local_Dest_port
global local_Heal_port
global local_in_port_name
global local_out_port_name
global local_GPS_port_name
global local_Status_port_name
global local_Dest_port_name
global local_Heal_port_name
global local_Proximity_in_port
global local_Proximity_out_port
global ors_in_port_name
global ors_out_port_name
global ors_GPS_port_name
global ors_Status_port_name
global ors_Dest_port_name
global ors_Heal_port_name
global ors_Proximity_in_port_name
global ors_Proximity_out_port_name
# Define the names for all the ports
port_prefix = "/ors/robots/" + robot_name + "/"
local_port_prefix = "/atrv_client/" + robot_name + "/"
#ors_in_port_name = port_prefix + "in"
#ors_out_port_name = port_prefix + "out"
ors_Dest_port_name = port_prefix + "Motion_Controller/in"
ors_Heal_port_name = port_prefix + "Healer_Beam/in"
ors_GPS_port_name = port_prefix + "GPS/out"
ors_Status_port_name = port_prefix + "Status_Sensor/out"
ors_Proximity_out_port_name = port_prefix + "Proximity_Sensor/out"
#ors_Proximity_in_port_name = port_prefix + "Proximity_Sensor/in"
#local_in_port_name = local_port_prefix + "in/"
#local_out_port_name = local_port_prefix + "out/"
local_GPS_port_name = local_port_prefix + "GPS/in/"
local_Status_port_name = local_port_prefix + "Status_Sensor/in/"
local_Dest_port_name = local_port_prefix + "Motion_Controller/out/"
local_Heal_port_name = local_port_prefix + "Healer_Beam/out/"
local_Proximity_in_port_name = local_port_prefix + "Proximity_Sensor/in"
#local_Proximity_out_port_name = local_port_prefix + "Proximity_Sensor/out"
# Start the yarp network connection
yarp.Network.init()
# Open the client ports
#local_in_port = yarp.BufferedPortBottle()
#local_in_port.open(local_in_port_name)
#local_out_port = yarp.BufferedPortBottle()
#local_out_port.open(local_out_port_name)
local_GPS_port = yarp.BufferedPortBottle()
local_GPS_port.open(local_GPS_port_name)
local_Status_port = yarp.BufferedPortBottle()
local_Status_port.open(local_Status_port_name)
local_Dest_port = yarp.BufferedPortBottle()
local_Dest_port.open(local_Dest_port_name)
local_Heal_port = yarp.BufferedPortBottle()
local_Heal_port.open(local_Heal_port_name)
#local_Proximity_out_port = yarp.BufferedPortBottle()
#local_Proximity_out_port.open(local_Proximity_out_port_name)
local_Proximity_in_port = yarp.BufferedPortBottle()
local_Proximity_in_port.open(local_Proximity_in_port_name)
# Connect the client ports to the simulator ports
#yarp.Network.connect (local_out_port_name, ors_in_port_name)
#yarp.Network.connect (ors_out_port_name, local_in_port_name)
yarp.Network.connect (ors_GPS_port_name, local_GPS_port_name)
yarp.Network.connect (ors_Status_port_name, local_Status_port_name)
yarp.Network.connect (local_Dest_port_name, ors_Dest_port_name)
yarp.Network.connect (local_Heal_port_name, ors_Heal_port_name)
#yarp.Network.connect (local_Proximity_out_port_name, ors_Proximity_in_port_name)
yarp.Network.connect (ors_Proximity_out_port_name, local_Proximity_in_port_name)
def usage(program_name):
print ("Usage: {0} [robot_name]\n", program_name)
def main():
print ("********* ATRV client *********")
robot_name = "ATRV"
argc = len(sys.argv)
if argc == 2:
robot_name = sys.argv[1]
elif argc > 3:
usage(sys.argv[0])
sys.exit()
port_setup(robot_name)
print (" * Writing commands to " + ors_in_port_name)
print (" * Listening status on " + ors_out_port_name)
print (" * Writing heal command to " + ors_Heal_port_name)
print (" * Writing destination to " + ors_Dest_port_name)
print (" * Listening to GPS on " + ors_GPS_port_name)
print (" * Listening to robot status on " + ors_Status_port_name)
print (" * Writing commands to " + ors_Proximity_in_port_name)
print (" * Listening status on " + ors_Proximity_out_port_name)
print (" * Enter command:")
command_robot()
if __name__ == "__main__":
main()
|
Arkapravo/morse-0.6
|
examples/clients/atrv/Rosace_Client.py
|
Python
|
bsd-3-clause
| 10,663
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_Anscombe/trend_MovingAverage/cycle_12/ar_12/test_artificial_1024_Anscombe_MovingAverage_12_12_0.py
|
Python
|
bsd-3-clause
| 269
|
#!/usr/bin/env python
import unittest
import os, sys, commands
import comm
class TestPackertoolsFunctions(unittest.TestCase):
def test_path(self):
comm.setUp()
chmodstatus = commands.getstatusoutput("chmod +x " + comm.Pck_Tools + "make_apk.py")
cmd = "make_apk.py --package=org.hello.world --name=world --arch=%s --mode=%s --app-url=https://crosswalk-project.org/" % \
(comm.ARCH, comm.MODE)
packstatus = commands.getstatusoutput(cmd)
if packstatus[0] == 0:
print "Generate APK ----------------> OK!"
result = commands.getstatusoutput("ls")
self.assertIn(comm.AppName, result[1])
inststatus = commands.getstatusoutput("adb install " + comm.AppName)
if inststatus[0] == 0:
print "Install APK ----------------> OK"
print "Find Package in device ---------------->Start"
pmstatus = commands.getstatusoutput("adb shell pm list packages |grep org.hello.world")
if pmstatus[0] == 0:
print "Find Package in device ---------------->O.K"
print "Launch APK ---------------->Start"
launchstatus = commands.getstatusoutput("adb shell am start -n org.hello.world/.TestActivity")
if launchstatus[0] !=0:
print "Launch APK ---------------->Error"
else:
print "Launch APK ---------------->OK"
print "Stop APK ---------------->Start"
stopstatus = commands.getstatusoutput("adb shell am force-stop org.hello.world")
if stopstatus[0] == 0:
print "Stop APK ---------------->O.K"
print "Uninstall APK ---------------->Start"
unistatus = commands.getstatusoutput("adb uninstall org.hello.world")
if unistatus[0] == 0:
print "Uninstall APK ---------------->O.K"
else:
print "Uninstall APK ---------------->Error"
else:
print "Stop APK ---------------->Error"
os.system("adb uninstall org.hello.world")
else:
print "Find Package in device ---------------->Error"
os.system("adb uninstall org.hello.world")
else:
print "Install APK ----------------> Error"
else:
print "Generate APK ----------------> Error!"
result = commands.getstatusoutput("ls")
self.assertNotIn(comm.AppName, result[1])
os.remove(comm.AppName)
os.chdir("../packertools2")
if __name__ == '__main__':
unittest.main()
|
yugang/crosswalk-test-suite
|
wrt/wrt-packertool2-android-tests/packertool2/pathtest.py
|
Python
|
bsd-3-clause
| 2,898
|
# coding=utf-8
from django import forms
from parler.forms import TranslatableModelForm
from allink_apps.members.models import Members
class MembersAdminForm(TranslatableModelForm):
class Meta:
model = Members
fields = ('member_nr', 'first_name', 'last_name', 'email', 'language')
class MembersProfileEditForm(forms.ModelForm):
class Meta:
model = Members
fields = ('email', )
|
allink/allink-apps
|
members/forms.py
|
Python
|
bsd-3-clause
| 422
|
import datetime
from djpcms.test import TestCase
from djpcms.models import SiteContent
class CalendarViewTest(TestCase):
fixtures = ["sitecontent.json"]
appurls = 'regression.apparchive.appurls'
def callView(self, url):
today = datetime.date.today()
response = self.client.get(today.strftime(url).lower())
if isinstance(response.context, list):
context = response.context[0]
else:
context = response.context
return today, response, context
def testYearView(self):
today, response, context = self.callView("/content/%Y/")
pa = context["paginator"]
self.assertEqual(int(context["year"]), today.year)
self.assertEqual(pa.total,
SiteContent.objects.filter(last_modified__year = today.year).count())
def testMonthView(self):
today, response, context = self.callView("/content/%Y/%b/")
pa = context["paginator"]
self.assertEqual(int(context["year"]), today.year)
#self.assertEqual(context["month"], today.month)
self.assertEqual(pa.total,
SiteContent.objects.filter(last_modified__year = today.year,
last_modified__month = today.month).count())
def testDayView(self):
today, response, context = self.callView("/content/%Y/%b/%d/")
pa = context["paginator"]
self.assertEqual(int(context["year"]), today.year)
#self.assertEqual(context["month"], today.month)
self.assertEqual(int(context["day"]), today.day)
self.assertEqual(pa.total,
SiteContent.objects.filter(last_modified__year = today.year,
last_modified__month = today.month,
last_modified__day = today.day).count())
|
strogo/djpcms
|
tests/regression/apparchive/tests.py
|
Python
|
bsd-3-clause
| 1,991
|
from .regex import REGEX
from .base import BasicParser
from ..models import DailySkuPerformanceReportItem
class DailySkuPerformanceReportParser(BasicParser):
TYPE = "daily-sku-performance-report"
FIRST_REGEX = REGEX.join([
"Start Date",
"End Date",
"Merchant Name",
"SKU",
"Clicks",
"Impressions",
"CTR",
"Currency",
"Total Spend",
"Avg\. CPC"
], '\t')
LINE_REGEX = REGEX.join([
REGEX.datetime,
REGEX.datetime,
REGEX.string_with_space,
REGEX.sku,
REGEX.integer,
REGEX.integer,
REGEX.float,
REGEX.currency,
REGEX.float,
REGEX.string
], '\t')
#LINE_REGEX = r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [A-Z]{3})\t(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [A-Z]{3})\t([_a-zA-Z 0-9\-]*)\t([A-Z0-9]*)\t(\d+)\t(\d+)\t(\d+\.\d{2,})\t([A-Z]{3})\t(\d+\.\d{2})\t(\S+)"
def process_validated_line(self, line, match):
"""
Create a DailySkuPerformanceReportItem
"""
item = {
"report_id": self.report.id,
"start_date": match.group(1),
"end_date": match.group(2),
"merchant_name": match.group(3),
"sku": match.group(4),
"clicks": int(match.group(5)),
"impressions": int(match.group(6)),
"ctr": match.group(7),
"currency": match.group(8),
"total_spend": match.group(9),
"avg_cpc": match.group(10),
}
self.insert_dict(item)
return
item = DailySkuPerformanceReportItem()
item.report_id = self.report.id
item.start_date = match.group(1)
item.end_date = match.group(2)
item.merchant_name = match.group(3)
item.sku = match.group(4)
item.clicks = int(match.group(5))
item.impressions = int(match.group(6))
item.ctr = match.group(7)
item.currency = match.group(8)
item.total_spend = match.group(9)
item.avg_cpc = match.group(10)
item.save(self.session)
|
Kellel/reports
|
report/parsers/daily_sku_performance_report.py
|
Python
|
bsd-3-clause
| 2,148
|
#!/usr/bin/env python
#
# parse_pdb_header.py
# parses header of PDB files into a python dictionary.
# emerged from the Columba database project www.columba-db.de.
#
# author: Kristian Rother
#
# license: same as BioPython, read LICENSE.TXT from current BioPython release.
#
# last modified: 9.2.2004
#
# Added some small changes: the whole PDB file is not read in anymore, but just
# until the first ATOM record (faster). I also split parse_pdb_header into
# parse_pdb_header and parse_pdb_header_list, because parse_pdb_header_list
# can be more easily reused in PDBParser.
#
# Thomas, 19/03/04
#
# Renamed some clearly private functions to _something (ie. parse_pdb_header_list
# is now _parse_pdb_header_list)
# Thomas 9/05/04
"""Parse the header of a PDB file."""
import re
def _get_journal(inl):
# JRNL AUTH L.CHEN,M.DOI,F.S.MATHEWS,A.Y.CHISTOSERDOV, 2BBK 7
journal=""
for l in inl:
if re.search("\AJRNL",l):
journal+=l[19:72].lower()
journal=re.sub("\s\s+"," ",journal)
return journal
def _get_references(inl):
# REMARK 1 REFERENCE 1 1CSE 11
# REMARK 1 AUTH W.BODE,E.PAPAMOKOS,D.MUSIL 1CSE 12
references=[]
actref=""
for l in inl:
if re.search("\AREMARK 1",l):
if re.search("\AREMARK 1 REFERENCE",l):
if actref!="":
actref=re.sub("\s\s+"," ",actref)
if actref!=" ":
references.append(actref)
actref=""
else:
actref+=l[19:72].lower()
if actref!="":
actref=re.sub("\s\s+"," ",actref)
if actref!=" ":
references.append(actref)
return references
# bring dates to format: 1909-01-08
def _format_date(pdb_date):
"""Converts dates from DD-Mon-YY to YYYY-MM-DD format."""
date=""
year=int(pdb_date[7:])
if year<50:
century=2000
else:
century=1900
date=str(century+year)+"-"
all_months=['xxx','Jan','Feb','Mar','Apr','May','Jun','Jul',\
'Aug','Sep','Oct','Nov','Dec']
month=str(all_months.index(pdb_date[3:6]))
if len(month)==1:
month = '0'+month
date = date+month+'-'+pdb_date[:2]
return date
def _chop_end_codes(line):
"""Chops lines ending with ' 1CSA 14' and the like."""
return re.sub("\s\s\s\s+[\w]{4}.\s+\d*\Z","",line)
def _chop_end_misc(line):
"""Chops lines ending with ' 14-JUL-97 1CSA' and the like."""
return re.sub("\s\s\s\s+.*\Z","",line)
def _nice_case(line):
"""Makes A Lowercase String With Capitals."""
l=line.lower()
s=""
i=0
nextCap=1
while i<len(l):
c=l[i]
if c>='a' and c<='z' and nextCap:
c=c.upper()
nextCap=0
elif c==' ' or c=='.' or c==',' or c==';' or c==':' or c=='\t' or\
c=='-' or c=='_':
nextCap=1
s+=c
i+=1
return s
def parse_pdb_header(infile):
"""
Returns the header lines of a pdb file as a dictionary.
Dictionary keys are: head, deposition_date, release_date, structure_method,
resolution, structure_reference, journal_reference, author and
compound.
"""
header = []
do_close = False
if isinstance(infile, str):
f = open(infile,'r')
do_close = True
else:
f = infile
for l in f:
record_type=l[0:6]
if record_type=='ATOM ' or record_type=='HETATM' or record_type=='MODEL ':
break
else:
header.append(l)
if do_close:
f.close()
return _parse_pdb_header_list(header)
def _parse_pdb_header_list(header):
# database fields
dict={'name':"",
'head':'',
'deposition_date' : "1909-01-08",
'release_date' : "1909-01-08",
'structure_method' : "unknown",
'resolution' : 0.0,
'structure_reference' : "unknown",
'journal_reference' : "unknown",
'author' : "",
'compound':{'1':{'misc':''}},'source':{'1':{'misc':''}}}
dict['structure_reference'] = _get_references(header)
dict['journal_reference'] = _get_journal(header)
comp_molid="1"
src_molid="1"
last_comp_key="misc"
last_src_key="misc"
for hh in header:
h=re.sub("[\s\n\r]*\Z","",hh) # chop linebreaks off
#key=re.sub("\s.+\s*","",h)
key = h[:6].strip()
#tail=re.sub("\A\w+\s+\d*\s*","",h)
tail = h[10:].strip()
# print key+":"+tail
# From here, all the keys from the header are being parsed
if key=="TITLE":
name=_chop_end_codes(tail).lower()
if 'name' in dict:
dict['name'] += " "+name
else:
dict['name']=name
elif key=="HEADER":
rr=re.search("\d\d-\w\w\w-\d\d",tail)
if rr!=None:
dict['deposition_date']=_format_date(_nice_case(rr.group()))
head=_chop_end_misc(tail).lower()
dict['head']=head
elif key=="COMPND":
tt=re.sub("\;\s*\Z","",_chop_end_codes(tail)).lower()
# look for E.C. numbers in COMPND lines
rec = re.search('\d+\.\d+\.\d+\.\d+',tt)
if rec:
dict['compound'][comp_molid]['ec_number']=rec.group()
tt=re.sub("\((e\.c\.)*\d+\.\d+\.\d+\.\d+\)","",tt)
tok=tt.split(":")
if len(tok)>=2:
ckey=tok[0]
cval=re.sub("\A\s*","",tok[1])
if ckey=='mol_id':
dict['compound'][cval]={'misc':''}
comp_molid=cval
last_comp_key="misc"
else:
dict['compound'][comp_molid][ckey]=cval
last_comp_key=ckey
else:
dict['compound'][comp_molid][last_comp_key]+=tok[0]+" "
elif key=="SOURCE":
tt=re.sub("\;\s*\Z","",_chop_end_codes(tail)).lower()
tok=tt.split(":")
# print tok
if len(tok)>=2:
ckey=tok[0]
cval=re.sub("\A\s*","",tok[1])
if ckey=='mol_id':
dict['source'][cval]={'misc':''}
comp_molid=cval
last_src_key="misc"
else:
dict['source'][comp_molid][ckey]=cval
last_src_key=ckey
else:
dict['source'][comp_molid][last_src_key]+=tok[0]+" "
elif key=="KEYWDS":
kwd=_chop_end_codes(tail).lower()
if 'keywords' in dict:
dict['keywords']+=" "+kwd
else:
dict['keywords']=kwd
elif key=="EXPDTA":
expd=_chop_end_codes(tail)
# chop junk at end of lines for some structures
expd=re.sub('\s\s\s\s\s\s\s.*\Z','',expd)
# if re.search('\Anmr',expd,re.IGNORECASE): expd='nmr'
# if re.search('x-ray diffraction',expd,re.IGNORECASE): expd='x-ray diffraction'
dict['structure_method']=expd.lower()
elif key=="CAVEAT":
# make Annotation entries out of these!!!
pass
elif key=="REVDAT":
rr=re.search("\d\d-\w\w\w-\d\d",tail)
if rr!=None:
dict['release_date']=_format_date(_nice_case(rr.group()))
elif key=="JRNL":
# print key,tail
if 'journal' in dict:
dict['journal']+=tail
else:
dict['journal']=tail
elif key=="AUTHOR":
auth = _nice_case(_chop_end_codes(tail))
if 'author' in dict:
dict['author']+=auth
else:
dict['author']=auth
elif key=="REMARK":
if re.search("REMARK 2 RESOLUTION.",hh):
r=_chop_end_codes(re.sub("REMARK 2 RESOLUTION.",'',hh))
r=re.sub("\s+ANGSTROM.*","",r)
try:
dict['resolution']=float(r)
except:
#print 'nonstandard resolution',r
dict['resolution']=None
else:
# print key
pass
if dict['structure_method']=='unknown':
if dict['resolution']>0.0: dict['structure_method']='x-ray diffraction'
return dict
if __name__=='__main__':
# Reads a PDB file passed as argument, parses its header, extracts
# some data and returns it as a dictionary.
import sys
filename = sys.argv[1]
handle = open(filename,'r')
data_dict = parse_pdb_header(handle)
handle.close()
# print the dictionary
for k, y in data_dict.items():
print("-"*40)
print(k)
print(y)
|
q10/fiddle
|
python/Parsers/PDBHeaderParser.py
|
Python
|
bsd-3-clause
| 9,105
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" Check Fuzzy Logic QC test
"""
from datetime import timedelta
from hypothesis import given, settings, strategies as st
from hypothesis.extra.numpy import arrays, array_shapes
import numpy as np
import pytest
from cotede.qctests import FuzzyLogic, fuzzylogic
from .compare import compare_input_types, compare_compound_feature_input_types
from ..data import DummyData
CFG = {
"output": {
"low": {'type': 'trimf', 'params': [0.0, 0.225, 0.45]},
"medium": {'type': 'trimf', 'params': [0.275, 0.5, 0.725]},
"high": {'type': 'smf', 'params': [0.55, 0.775]}
},
"features": {
"spike": {
"weight": 1,
"low": {'type': 'zmf', 'params': [0.07, 0.2]},
"medium": {'type': 'trapmf', 'params': [0.07, 0.2, 2, 6]},
"high": {'type': 'smf', 'params': [2, 6]}
},
"woa_normbias": {
"weight": 1,
"low": {'type': 'zmf', 'params': [3, 4]},
"medium": {'type': 'trapmf', 'params': [3, 4, 5, 6]},
"high": {'type': 'smf', 'params': [5, 6]}
},
"gradient": {
"weight": 1,
"low": {'type': 'zmf', 'params': [0.5, 1.5]},
"medium": {'type': 'trapmf', 'params': [0.5, 1.5, 3, 4]},
"high": {'type': 'smf', 'params': [3, 4]}
}
}
}
def test_standard_dataset():
features = {
"fuzzylogic": np.array([np.nan, 0.22222222, 0.22222222, 0.22222222, 0.23232323, 0.22222222, 0.26262626, 0.22222222, 0.24242424, 0.22222222, 0.29292929, 0.43434343, 0.22222222, np.nan, np.nan])
}
flags = {
"fuzzylogic": np.array(
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 1, 0, 0], dtype="i1"
)
}
profile = DummyData()
y = FuzzyLogic(profile, "TEMP", cfg=CFG)
assert 'fuzzylogic' in y.flags
assert np.shape(profile["TEMP"]) == np.shape(y.flags["fuzzylogic"])
for f in features:
assert np.allclose(y.features[f], features[f], equal_nan=True)
for f in flags:
assert np.allclose(y.flags[f], flags[f], equal_nan=True)
def test_feature_input_types():
x = np.array([0, 1, -1, 2, -2, 3, 2, 4, 0, np.nan])
features = {"spike": x, "woa_normbias": x, "gradient": x}
compare_compound_feature_input_types(fuzzylogic, features, cfg=CFG)
@given(data=arrays(dtype=np.float, shape=array_shapes(min_dims=2, max_dims=2, min_side=3), elements=st.floats(allow_infinity=True, allow_nan=True)))
@settings(deadline=timedelta(milliseconds=500))
def test_feature_input_types(data):
data = {"spike": data[:,0], "woa_normbias": data[:,1], "gradient": data[:,2]}
compare_compound_feature_input_types(fuzzylogic, data=data, cfg=CFG)
@pytest.mark.skip(reason="Check what is the issue with attrs")
def test_input_types():
# compare_tuple(FuzzyLogic, cfg=CFG)
compare_input_types(FuzzyLogic, cfg=CFG)
|
castelao/CoTeDe
|
tests/qctests/test_qc_fuzzylogic.py
|
Python
|
bsd-3-clause
| 3,272
|
"""
SSL/TLS context definition.
Most of this code is borrowed from the SGAS 3.X LUTS codebase.
NORDUnet holds the copyright for SGAS 3.X LUTS and OpenNSA.
"""
import os
from OpenSSL import SSL
class ContextFactory:
def __init__(self, private_key_path, public_key_path, certificate_dir, verify=True):
self.private_key_path = private_key_path
self.public_key_path = public_key_path
self.certificate_dir = certificate_dir
self.verify = verify
self.ctx = None
def getContext(self):
if self.ctx is not None:
return self.ctx
else:
self.ctx = self._createContext()
return self.ctx
def _createContext(self):
ctx = SSL.Context(SSL.TLSv1_METHOD) # only tls v1 (its almost 2012, should be okay
ctx.use_privatekey_file(self.private_key_path)
ctx.use_certificate_file(self.public_key_path)
ctx.check_privatekey() # sanity check
def verify_callback(conn, x509, error_number, error_depth, allowed):
# just return what openssl thinks is right
if self.verify:
return allowed # return what openssl thinks is right
else:
return 1 # allow everything which has a cert
ctx.set_verify(SSL.VERIFY_PEER, verify_callback)
calist = [ ca for ca in os.listdir(self.certificate_dir) if ca.endswith('.0') ]
for ca in calist:
# openssl wants absolute paths
ca = os.path.join(self.certificate_dir, ca)
ctx.load_verify_locations(ca)
return ctx
|
jeroenh/OpenNSA
|
opennsa/ctxfactory.py
|
Python
|
bsd-3-clause
| 1,631
|
import torch
from torch import nn, Tensor
from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape
def ps_roi_pool(
input: Tensor,
boxes: Tensor,
output_size: int,
spatial_scale: float = 1.0,
) -> Tensor:
"""
Performs Position-Sensitive Region of Interest (RoI) Pool operator
described in R-FCN
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
is performed, as (height, width).
spatial_scale (float): a scaling factor that maps the box coordinates to
the input coordinates. For example, if your boxes are defined on the scale
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
the original image), you'll want to set this to 0.5. Default: 1.0
Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(ps_roi_pool)
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
output, _ = torch.ops.torchvision.ps_roi_pool(input, rois, spatial_scale, output_size[0], output_size[1])
return output
class PSRoIPool(nn.Module):
"""
See :func:`ps_roi_pool`.
"""
def __init__(self, output_size: int, spatial_scale: float):
super().__init__()
_log_api_usage_once(self)
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return ps_roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})"
return s
|
pytorch/vision
|
torchvision/ops/ps_roi_pool.py
|
Python
|
bsd-3-clause
| 2,839
|
# -*- coding: utf-8 -*-
"""
eve.flaskapp
~~~~~~~~~~~~
This module implements the central WSGI application object as a Flask
subclass.
:copyright: (c) 2016 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import copy
from events import Events
from flask import Flask
from werkzeug.routing import BaseConverter
from werkzeug.serving import WSGIRequestHandler
import eve
from eve.defaults import build_defaults
from eve.endpoints import collections_endpoint, item_endpoint, home_endpoint, \
error_endpoint, media_endpoint, schema_collection_endpoint, \
schema_item_endpoint
from eve.exceptions import ConfigException, SchemaException
from eve.io.mongo import Mongo, Validator, GridFSMediaStorage, create_index
from eve.logging import RequestFilter
from eve.utils import api_prefix, extract_key_values
class EveWSGIRequestHandler(WSGIRequestHandler):
""" Extend werkzeug request handler to include current Eve version in all
responses, which is super-handy for debugging.
"""
@property
def server_version(self):
return 'Eve/%s ' % eve.__version__ + super(EveWSGIRequestHandler,
self).server_version
class RegexConverter(BaseConverter):
""" Extend werkzeug routing by supporting regex for urls/API endpoints """
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
class Eve(Flask, Events):
""" The main Eve object. On initialization it will load Eve settings, then
configure and enable the API endpoints. The API is launched by executing
the code below:::
app = Eve()
app.run()
:param import_name: the name of the application package
:param settings: the name of the settings file. Defaults to `settings.py`.
:param validator: custom validation class. Must be a
:class:`~cerberus.Validator` subclass. Defaults to
:class:`eve.io.mongo.Validator`.
:param data: the data layer class. Must be a :class:`~eve.io.DataLayer`
subclass. Defaults to :class:`~eve.io.Mongo`.
:param auth: the authentication class used to authenticate incoming
requests. Must be a :class: `eve.auth.BasicAuth` subclass.
:param redis: the redis (pyredis) instance used by the Rate-Limiting
feature, if enabled.
:param url_converters: dictionary of Flask url_converters to add to
supported ones (int, float, path, regex).
:param json_encoder: custom json encoder class. Must be a
JSONEncoder subclass. You probably want it to be
as eve.io.base.BaseJSONEncoder subclass.
:param media: the media storage class. Must be a
:class:`~eve.io.media.MediaStorage` subclass.
:param kwargs: optional, standard, Flask parameters.
.. versionchanged:: 0.6.1
Fix: When `SOFT_DELETE` is active an exclusive `datasource.projection`
causes a 500 error. Closes #752.
.. versionchanged:: 0.6
Add request metadata to default log record.
.. versionchanged:: 0.4
Ensure all errors returns a parseable body. Closes #365.
'auth' argument can be either an instance or a callable. Closes #248.
Made resource setup more DRY by calling register_resource.
.. versionchanged:: 0.3
Support for optional media storage system. Defaults to
GridFSMediaStorage.
.. versionchanged:: 0.2
Support for additional Flask url converters.
Support for optional, custom json encoder class.
Support for endpoint-level authenticatoin classes.
New method Eve.register_resource() for registering new resource after
initialization of Eve object. This is needed for simpler initialization
API of all ORM/ODM extensions.
.. versionchanged:: 0.1.0
Now supporting both "trailing slashes" and "no-trailing slashes" URLs.
.. versionchanged:: 0.0.7
'redis' argument added to handle an accessory Redis server (currently
used by the Rate-Limiting feature).
.. versionchanged:: 0.0.6
'Events' added to the list of super classes, allowing for the arbitrary
raising of events within the application.
.. versionchanged:: 0.0.4
'auth' argument added to handle authentication classes
"""
#: Allowed methods for resource endpoints
supported_resource_methods = ['GET', 'POST', 'DELETE']
#: Allowed methods for item endpoints
supported_item_methods = ['GET', 'PATCH', 'DELETE', 'PUT']
def __init__(self, import_name=__package__, settings='settings.py',
validator=Validator, data=Mongo, auth=None, redis=None,
url_converters=None, json_encoder=None,
media=GridFSMediaStorage, **kwargs):
""" Eve main WSGI app is implemented as a Flask subclass. Since we want
to be able to launch our API by simply invoking Flask's run() method,
we need to enhance our super-class a little bit.
"""
super(Eve, self).__init__(import_name, **kwargs)
# add support for request metadata to the log record
self.logger.addFilter(RequestFilter())
self.validator = validator
self.settings = settings
self.load_config()
self.validate_domain_struct()
# enable regex routing
self.url_map.converters['regex'] = RegexConverter
# optional url_converters and json encoder
if url_converters:
self.url_map.converters.update(url_converters)
self.data = data(self)
if json_encoder:
self.data.json_encoder_class = json_encoder
self.media = media(self) if media else None
self.redis = redis
if auth:
self.auth = auth() if callable(auth) else auth
else:
self.auth = None
self._init_url_rules()
self._init_media_endpoint()
self._init_schema_endpoint()
if self.config['OPLOG'] is True:
self._init_oplog()
# validate and set defaults for each resource
# Use a snapshot of the DOMAIN setup for iteration so
# further insertion of versioned resources do not
# cause a RuntimeError due to the change of size of
# the dict
domain_copy = copy.deepcopy(self.config['DOMAIN'])
for resource, settings in domain_copy.items():
self.register_resource(resource, settings)
# it seems like both domain_copy and config['DOMAIN']
# suffered changes at this point, so merge them
# self.config['DOMAIN'].update(domain_copy)
self.register_error_handlers()
def run(self, host=None, port=None, debug=None, **options):
"""
Pass our own subclass of :class:`werkzeug.serving.WSGIRequestHandler
to Flask.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000``.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information. """
options.setdefault('request_handler', EveWSGIRequestHandler)
super(Eve, self).run(host, port, debug, **options)
def load_config(self):
""" API settings are loaded from standard python modules. First from
`settings.py`(or alternative name/path passed as an argument) and
then, when defined, from the file specified in the
`EVE_SETTINGS` environment variable.
Since we are a Flask subclass, any configuration value supported by
Flask itself is available (besides Eve's proper settings).
.. versionchanged:: 0.6
SchemaErrors raised during configuration
.. versionchanged:: 0.5
Allow EVE_SETTINGS envvar to be used exclusively. Closes #461.
.. versionchanged:: 0.2
Allow use of a dict object as settings.
"""
# load defaults
self.config.from_object('eve.default_settings')
# overwrite the defaults with custom user settings
if isinstance(self.settings, dict):
self.config.update(self.settings)
else:
if os.path.isabs(self.settings):
pyfile = self.settings
else:
abspath = os.path.abspath(os.path.dirname(sys.argv[0]))
pyfile = os.path.join(abspath, self.settings)
try:
self.config.from_pyfile(pyfile)
except IOError:
# assume envvar is going to be used exclusively
pass
except:
raise
# overwrite settings with custom environment variable
envvar = 'EVE_SETTINGS'
if os.environ.get(envvar):
self.config.from_envvar(envvar)
def validate_domain_struct(self):
""" Validates that Eve configuration settings conform to the
requirements.
"""
try:
domain = self.config['DOMAIN']
except:
raise ConfigException('DOMAIN dictionary missing or wrong.')
if not isinstance(domain, dict):
raise ConfigException('DOMAIN must be a dict.')
def validate_config(self):
""" Makes sure that REST methods expressed in the configuration
settings are supported.
.. versionchanged:: 0.2.0
Default supported methods are now class-level attributes.
Resource validation delegated to _validate_resource_settings().
.. versionchanged:: 0.1.0
Support for PUT method.
.. versionchanged:: 0.0.4
Support for 'allowed_roles' and 'allowed_item_roles'
.. versionchanged:: 0.0.2
Support for DELETE resource method.
"""
# make sure that global resource methods are supported.
self.validate_methods(self.supported_resource_methods,
self.config.get('RESOURCE_METHODS'),
'resource')
# make sure that global item methods are supported.
self.validate_methods(self.supported_item_methods,
self.config.get('ITEM_METHODS'),
'item')
# make sure that individual resource/item methods are supported.
for resource, settings in self.config['DOMAIN'].items():
self._validate_resource_settings(resource, settings)
def _validate_resource_settings(self, resource, settings):
""" Validates one resource in configuration settings.
:param resource: name of the resource which settings refer to.
:param settings: settings of resource to be validated.
.. versionchanged:: 0.4
validate that auth_field is not set to ID_FIELD. See #266.
.. versionadded:: 0.2
"""
self.validate_methods(self.supported_resource_methods,
settings['resource_methods'],
'[%s] resource ' % resource)
self.validate_methods(self.supported_item_methods,
settings['item_methods'],
'[%s] item ' % resource)
# while a resource schema is optional for read-only access,
# it is mandatory for write-access to resource/items.
if 'POST' in settings['resource_methods'] or \
'PATCH' in settings['item_methods']:
if len(settings['schema']) == 0:
raise ConfigException('A resource schema must be provided '
'when POST or PATCH methods are allowed '
'for a resource [%s].' % resource)
self.validate_roles('allowed_roles', settings, resource)
self.validate_roles('allowed_read_roles', settings, resource)
self.validate_roles('allowed_write_roles', settings, resource)
self.validate_roles('allowed_item_roles', settings, resource)
self.validate_roles('allowed_item_read_roles', settings, resource)
self.validate_roles('allowed_item_write_roles', settings, resource)
if settings['auth_field'] == settings['id_field']:
raise ConfigException('"%s": auth_field cannot be set to id_field '
'(%s)' % (resource, settings['id_field']))
self.validate_schema(resource, settings['schema'])
def validate_roles(self, directive, candidate, resource):
""" Validates that user role directives are syntactically and formally
adeguate.
:param directive: either 'allowed_[read_|write_]roles' or
'allow_item_[read_|write_]roles'.
:param candidate: the candidate setting to be validated.
:param resource: name of the resource to which the candidate settings
refer to.
.. versionadded:: 0.0.4
"""
roles = candidate[directive]
if not isinstance(roles, list):
raise ConfigException("'%s' must be list"
"[%s]." % (directive, resource))
def validate_methods(self, allowed, proposed, item):
""" Compares allowed and proposed methods, raising a `ConfigException`
when they don't match.
:param allowed: a list of supported (allowed) methods.
:param proposed: a list of proposed methods.
:param item: name of the item to which the methods would be applied.
Used when raising the exception.
"""
diff = set(proposed) - set(allowed)
if diff:
raise ConfigException('Unallowed %s method(s): %s. '
'Supported: %s' %
(item, ', '.join(diff),
', '.join(allowed)))
def validate_schema(self, resource, schema):
""" Validates a resource schema.
:param resource: resource name.
:param schema: schema definition for the resource.
.. versionchanged:: 0.6.2
Do not allow '$' and '.' in root and dict field names. #780.
.. versionchanged:: 0.6
ID_FIELD in the schema is not an offender anymore.
.. versionchanged:: 0.5
Add ETAG to automatic fields check.
.. versionchanged:: 0.4
Checks against offending document versioning fields.
Supports embedded data_relation with version.
.. versionchanged:: 0.2
Allow ID_FIELD in resource schema if not of 'objectid' type.
.. versionchanged:: 0.1.1
'collection' setting renamed to 'resource' (data_relation).
Fix order of string arguments in exception message.
.. versionchanged:: 0.1.0
Validation for 'embeddable' fields.
.. versionchanged:: 0.0.5
Validation of the 'data_relation' field rule.
Now collecting offending items in a list and inserting results into
the exception message.
"""
def validate_field_name(field):
forbidden = ['$', '.']
if any(x in field for x in forbidden):
raise SchemaException(
"Field '%s' cannot contain any of the following: '%s'." %
(field, ', '.join(forbidden)))
resource_settings = self.config['DOMAIN'][resource]
# ensure automatically handled fields aren't defined
fields = [eve.DATE_CREATED, eve.LAST_UPDATED, eve.ETAG]
if resource_settings['versioning'] is True:
fields += [
self.config['VERSION'],
self.config['LATEST_VERSION'],
resource_settings['id_field'] +
self.config['VERSION_ID_SUFFIX']]
if resource_settings['soft_delete'] is True:
fields += [self.config['DELETED']]
offenders = []
for field in fields:
if field in schema:
offenders.append(field)
if offenders:
raise SchemaException('field(s) "%s" not allowed in "%s" schema '
'(they will be handled automatically).'
% (', '.join(offenders), resource))
for field, ruleset in schema.items():
validate_field_name(field)
if 'dict' in ruleset.get('type', ''):
for field in ruleset.get('schema', {}).keys():
validate_field_name(field)
# check data_relation rules
if 'data_relation' in ruleset:
if 'resource' not in ruleset['data_relation']:
raise SchemaException("'resource' key is mandatory for "
"the 'data_relation' rule in "
"'%s: %s'" % (resource, field))
if ruleset['data_relation'].get('embeddable', False):
# special care for data_relations with a version
value_field = ruleset['data_relation']['field']
if ruleset['data_relation'].get('version', False):
if 'schema' not in ruleset or \
value_field not in ruleset['schema'] or \
'type' not in ruleset['schema'][value_field]:
raise SchemaException(
"Must defined type for '%s' in schema when "
"declaring an embedded data_relation with"
" version." % value_field
)
# TODO are there other mandatory settings? Validate them here
def set_defaults(self):
""" When not provided, fills individual resource settings with default
or global configuration settings.
.. versionchanged:: 0.4
`versioning`
`VERSION` added to automatic projection (when applicable)
.. versionchanged:: 0.2
Setting of actual resource defaults is delegated to
_set_resource_defaults().
.. versionchanged:: 0.1.1
'default' values that could be assimilated to None (0, None, "")
would be ignored.
'dates' helper removed as datetime conversion is now handled by
the eve.methods.common.data_parse function.
.. versionchanged:: 0.1.0
'embedding'.
Support for optional HATEOAS.
.. versionchanged:: 0.0.9
'auth_username_field' renamed to 'auth_field'.
Always include automatic fields despite of datasource projections.
.. versionchanged:: 0.0.8
'mongo_write_concern'
.. versionchanged:: 0.0.7
'extra_response_fields'
.. versionchanged:: 0.0.6
'datasource[projection]'
'projection',
'allow_unknown'
.. versionchanged:: 0.0.5
'auth_username_field'
'filters',
'sorting',
'pagination'.
.. versionchanged:: 0.0.4
'defaults',
'datasource',
'public_methods',
'public_item_methods',
'allowed_roles',
'allowed_item_roles'.
.. versionchanged:: 0.0.3
`item_title` default value.
"""
for resource, settings in self.config['DOMAIN'].items():
self._set_resource_defaults(resource, settings)
def _set_resource_defaults(self, resource, settings):
""" Low-level method which sets default values for one resource.
.. versionchanged:: 0.6.2
Fix: startup crash when both SOFT_DELETE and ALLOW_UNKNOWN are True.
(#722).
.. versionchanged:: 0.6.1
Fix: inclusive projection defined for a datasource is ignored
(#722).
.. versionchanged:: 0.6
Support for 'mongo_indexes'.
.. versionchanged:: 0.5
Don't set default projection if 'allow_unknown' is active (#497).
'internal_resource'
.. versionchanged:: 0.3
Set projection to None when schema is not provided for the resource.
Support for '_media' helper.
.. versionchanged:: 0.2
'resource_title',
'default_sort',
'embedded_fields'.
Support for endpoint-level authenticatoin classes.
"""
settings.setdefault('url', resource)
settings.setdefault('resource_methods',
self.config['RESOURCE_METHODS'])
settings.setdefault('public_methods',
self.config['PUBLIC_METHODS'])
settings.setdefault('allowed_roles', self.config['ALLOWED_ROLES'])
settings.setdefault('allowed_read_roles',
self.config['ALLOWED_READ_ROLES'])
settings.setdefault('allowed_write_roles',
self.config['ALLOWED_WRITE_ROLES'])
settings.setdefault('cache_control', self.config['CACHE_CONTROL'])
settings.setdefault('cache_expires', self.config['CACHE_EXPIRES'])
settings.setdefault('id_field', self.config['ID_FIELD'])
settings.setdefault('item_lookup_field',
self.config['ITEM_LOOKUP_FIELD'])
settings.setdefault('item_url', self.config['ITEM_URL'])
settings.setdefault('resource_title', settings['url'])
settings.setdefault('item_title',
resource.rstrip('s').capitalize())
settings.setdefault('item_lookup', self.config['ITEM_LOOKUP'])
settings.setdefault('public_item_methods',
self.config['PUBLIC_ITEM_METHODS'])
settings.setdefault('allowed_item_roles',
self.config['ALLOWED_ITEM_ROLES'])
settings.setdefault('allowed_item_read_roles',
self.config['ALLOWED_ITEM_READ_ROLES'])
settings.setdefault('allowed_item_write_roles',
self.config['ALLOWED_ITEM_WRITE_ROLES'])
settings.setdefault('allowed_filters',
self.config['ALLOWED_FILTERS'])
settings.setdefault('sorting', self.config['SORTING'])
settings.setdefault('embedding', self.config['EMBEDDING'])
settings.setdefault('embedded_fields', [])
settings.setdefault('pagination', self.config['PAGINATION'])
settings.setdefault('projection', self.config['PROJECTION'])
settings.setdefault('versioning', self.config['VERSIONING'])
settings.setdefault('soft_delete', self.config['SOFT_DELETE'])
settings.setdefault('bulk_enabled', self.config['BULK_ENABLED'])
settings.setdefault('internal_resource',
self.config['INTERNAL_RESOURCE'])
settings.setdefault('etag_ignore_fields', None)
# TODO make sure that this we really need the test below
if settings['item_lookup']:
item_methods = self.config['ITEM_METHODS']
else:
item_methods = eve.ITEM_METHODS
settings.setdefault('item_methods', item_methods)
settings.setdefault('auth_field',
self.config['AUTH_FIELD'])
settings.setdefault('allow_unknown', self.config['ALLOW_UNKNOWN'])
settings.setdefault('transparent_schema_rules',
self.config['TRANSPARENT_SCHEMA_RULES'])
settings.setdefault('extra_response_fields',
self.config['EXTRA_RESPONSE_FIELDS'])
settings.setdefault('mongo_write_concern',
self.config['MONGO_WRITE_CONCERN'])
settings.setdefault('mongo_indexes', {})
settings.setdefault('hateoas',
self.config['HATEOAS'])
settings.setdefault('authentication', self.auth if self.auth else None)
# empty schemas are allowed for read-only access to resources
schema = settings.setdefault('schema', {})
self.set_schema_defaults(schema, settings['id_field'])
# 'defaults' helper set contains the names of fields with default
# values in their schema definition.
# TODO support default values for embedded documents.
settings['defaults'] = build_defaults(schema)
# list of all media fields for the resource
settings['_media'] = [field for field, definition in schema.items() if
definition.get('type') == 'media']
if settings['_media'] and not self.media:
raise ConfigException('A media storage class of type '
' eve.io.media.MediaStorage but be defined '
'for "media" fields to be properly stored.')
self._set_resource_datasource(resource, schema, settings)
def _set_resource_datasource(self, resource, schema, settings):
""" Set the default values for the resource 'datasource' setting.
.. versionadded:: 0.7
"""
settings.setdefault('datasource', {})
ds = settings['datasource']
ds.setdefault('source', resource)
ds.setdefault('filter', None)
ds.setdefault('default_sort', None)
self._set_resource_projection(ds, schema, settings)
aggregation = ds.setdefault('aggregation', None)
if aggregation:
aggregation.setdefault('options', {})
# endpoints serving aggregation queries are read-only and do not
# support item lookup.
settings['resource_methods'] = ['GET']
settings['item_lookup'] = False
def _set_resource_projection(self, ds, schema, settings):
""" Set datasource projection for a resource
.. versionchanged:: 0.6.3
Fix: If datasource source is specified no fields are included by
default. Closes #842.
.. versionadded:: 0.6.2
"""
projection = ds.get('projection', {})
# check if any exclusion projection is defined
exclusion = any(((k, v) for k, v in projection.items() if v == 0)) \
if projection else None
# If no exclusion projection is defined, enhance the projection
# with automatic fields. Using both inclusion and exclusion will
# be rejected by Mongo
if not exclusion and len(schema) and \
settings['allow_unknown'] is False:
if not projection:
projection.update(dict((field, 1) for (field) in schema))
# enable retrieval of actual schema fields only. Eventual db
# fields not included in the schema won't be returned.
# despite projection, automatic fields are always included.
projection[settings['id_field']] = 1
projection[self.config['LAST_UPDATED']] = 1
projection[self.config['DATE_CREATED']] = 1
projection[self.config['ETAG']] = 1
if settings['versioning'] is True:
projection[self.config['VERSION']] = 1
projection[
settings['id_field'] +
self.config['VERSION_ID_SUFFIX']] = 1
else:
# all fields are returned.
projection = None
ds.setdefault('projection', projection)
if settings['soft_delete'] is True and not exclusion and \
ds['projection'] is not None:
ds['projection'][self.config['DELETED']] = 1
# 'defaults' helper set contains the names of fields with default
# values in their schema definition.
# TODO support default values for embedded documents.
settings['defaults'] = build_defaults(schema)
# list of all media fields for the resource
settings['_media'] = [field for field, definition in schema.items() if
definition.get('type') == 'media']
if settings['_media'] and not self.media:
raise ConfigException('A media storage class of type '
' eve.io.media.MediaStorage must be defined '
'for "media" fields to be properly stored.')
def set_schema_defaults(self, schema, id_field):
""" When not provided, fills individual schema settings with default
or global configuration settings.
:param schema: the resource schema to be initialized with default
values
.. versionchanged: 0.6
Add default ID_FIELD to the schema, so documents with an existing
ID_FIELD can also be stored.
.. versionchanged: 0.0.7
Setting the default 'field' value would not happen if the
'data_relation' was nested deeper than the first schema level (#60).
.. versionadded: 0.0.5
"""
# Don't set id_field 'unique' since we already handle
# DuplicateKeyConflict in the mongo layer. This also
# avoids a performance hit (with 'unique' rule set, we would
# end up with an extra db loopback on every insert).
schema.setdefault(id_field, {'type': 'objectid'})
# set default 'field' value for all 'data_relation' rulesets, however
# nested
for data_relation in list(extract_key_values('data_relation', schema)):
data_relation.setdefault('field', id_field)
@property
def api_prefix(self):
""" Prefix to API endpoints.
.. versionadded:: 0.2
"""
return api_prefix(self.config['URL_PREFIX'],
self.config['API_VERSION'])
def _add_resource_url_rules(self, resource, settings):
""" Builds the API url map for one resource. Methods are enabled for
each mapped endpoint, as configured in the settings.
.. versionchanged:: 0.5
Don't add resource to url rules if it's flagged as internal.
Strip regexes out of config.URLS helper. Closes #466.
.. versionadded:: 0.2
"""
self.config['SOURCES'][resource] = settings['datasource']
if settings['internal_resource']:
return
url = '%s/%s' % (self.api_prefix, settings['url'])
pretty_url = settings['url']
if '<' in pretty_url:
pretty_url = pretty_url[:pretty_url.index('<') + 1] + \
pretty_url[pretty_url.rindex(':') + 1:]
self.config['URLS'][resource] = pretty_url
# resource endpoint
endpoint = resource + "|resource"
self.add_url_rule(url, endpoint, view_func=collections_endpoint,
methods=settings['resource_methods'] + ['OPTIONS'])
# item endpoint
if settings['item_lookup']:
item_url = '%s/<%s:%s>' % (url, settings['item_url'],
settings['item_lookup_field'])
endpoint = resource + "|item_lookup"
self.add_url_rule(item_url, endpoint,
view_func=item_endpoint,
methods=settings['item_methods'] + ['OPTIONS'])
if 'PATCH' in settings['item_methods']:
# support for POST with X-HTTM-Method-Override header for
# clients not supporting PATCH. Also see item_endpoint() in
# endpoints.py
endpoint = resource + "|item_post_override"
self.add_url_rule(item_url, endpoint, view_func=item_endpoint,
methods=['POST'])
# also enable an alternative lookup/endpoint if allowed
lookup = settings.get('additional_lookup')
if lookup:
l_type = settings['schema'][lookup['field']]['type']
if l_type == 'integer':
item_url = '%s/<int:%s>' % (url, lookup['field'])
else:
item_url = '%s/<%s:%s>' % (url, lookup['url'],
lookup['field'])
endpoint = resource + "|item_additional_lookup"
self.add_url_rule(item_url, endpoint, view_func=item_endpoint,
methods=['GET', 'OPTIONS'])
def _init_url_rules(self):
""" Builds the API url map. Methods are enabled for each mapped
endpoint, as configured in the settings.
.. versionchanged:: 0.4
Renamed from '_add_url_rules' to '_init_url_rules' to make code more
DRY. Individual resource rules get built from register_resource now.
.. versionchanged:: 0.2
Delegate adding of resource rules to _add_resource_rules().
.. versionchanged:: 0.1.1
Simplified URL rules. Not using regexes anymore to return the
endpoint URL to the endpoint function. This allows for nested
endpoints to function properly.
.. versionchanged:: 0.0.9
Handle the case of 'additional_lookup' field being an integer.
.. versionchanged:: 0.0.5
Support for Cross-Origin Resource Sharing. 'OPTIONS' method is
explicitly routed to standard endpoints to allow for proper CORS
processing.
.. versionchanged:: 0.0.4
config.SOURCES. Maps resources to their datasources.
.. versionchanged:: 0.0.3
Support for API_VERSION as an endpoint prefix.
"""
# helpers
self.config['URLS'] = {} # maps resources to urls
self.config['SOURCES'] = {} # maps resources to their datasources
# we choose not to care about trailing slashes at all.
# Both '/resource/' and '/resource' will work, same with
# '/resource/<id>/' and '/resource/<id>'
self.url_map.strict_slashes = False
# home page (API entry point)
self.add_url_rule('%s/' % self.api_prefix, 'home',
view_func=home_endpoint, methods=['GET', 'OPTIONS'])
def register_resource(self, resource, settings):
""" Registers new resource to the domain.
Under the hood this validates given settings, updates default values
and adds necessary URL routes (builds api url map).
If there exists some resource with given name, it is overwritten.
:param resource: resource name.
:param settings: settings for given resource.
.. versionchanged:: 0.6
Support for 'mongo_indexes'.
.. versionchanged:: 0.4
Support for document versioning.
.. versionadded:: 0.2
"""
# this line only makes sense when we call this function outside of the
# standard Eve setup routine, but it doesn't hurt to still call it
self.config['DOMAIN'][resource] = settings
# set up resource
self._set_resource_defaults(resource, settings)
self._validate_resource_settings(resource, settings)
self._add_resource_url_rules(resource, settings)
# add rules for version control collections if appropriate
if settings['versioning'] is True:
versioned_resource = resource + self.config['VERSIONS']
self.config['DOMAIN'][versioned_resource] = \
copy.deepcopy(self.config['DOMAIN'][resource])
self.config['DOMAIN'][versioned_resource]['datasource']['source'] \
+= self.config['VERSIONS']
self.config['SOURCES'][versioned_resource] = \
copy.deepcopy(self.config['SOURCES'][resource])
self.config['SOURCES'][versioned_resource]['source'] += \
self.config['VERSIONS']
# the new versioned resource also needs URL rules
self._add_resource_url_rules(
versioned_resource,
self.config['DOMAIN'][versioned_resource]
)
# create the mongo db indexes
mongo_indexes = self.config['DOMAIN'][resource]['mongo_indexes']
if mongo_indexes:
for name, value in mongo_indexes.items():
if isinstance(value, tuple):
list_of_keys, index_options = value
else:
list_of_keys = value
index_options = {}
create_index(self, resource, name, list_of_keys, index_options)
def register_error_handlers(self):
""" Register custom error handlers so we make sure that all errors
return a parseable body.
.. versionadded:: 0.4
"""
for code in self.config['STANDARD_ERRORS']:
self.error_handler_spec[None][code] = error_endpoint
def _init_oplog(self):
""" If enabled, configures the OPLOG endpoint.
.. versionchanged:: 0.7
Add 'u' field to oplog audit schema. See #846.
.. versionadded:: 0.5
"""
name, endpoint, audit, extra = (
self.config['OPLOG_NAME'],
self.config['OPLOG_ENDPOINT'],
self.config['OPLOG_AUDIT'],
self.config['OPLOG_RETURN_EXTRA_FIELD']
)
settings = self.config['DOMAIN'].setdefault(name, {})
settings.setdefault('datasource', {'source': name})
# this endpoint is always read-only
settings['resource_methods'] = ['GET']
settings['item_methods'] = ['GET']
if endpoint:
settings.setdefault('url', endpoint)
settings['internal_resource'] = False
else:
# make it an internal resource
settings['url'] = name
settings['internal_resource'] = True
# schema is also fixed. it is needed because otherwise we
# would end up exposing the AUTH_FIELD when User-Restricted-
# Resource-Access is enabled.
settings['schema'] = {
'r': {},
'o': {},
'i': {},
}
if extra:
settings['schema'].update(
{'extra': {}}
)
if audit:
settings['schema'].update(
{
'ip': {},
'c': {},
'u': {},
}
)
def _init_media_endpoint(self):
endpoint = self.config['MEDIA_ENDPOINT']
if endpoint:
media_url = '%s/%s/<%s:_id>' % (self.api_prefix,
endpoint,
self.config['MEDIA_URL'])
self.add_url_rule(media_url, 'media',
view_func=media_endpoint, methods=['GET'])
def _init_schema_endpoint(self):
"""Configures the schema endpoint if set in configuration.
"""
endpoint = self.config['SCHEMA_ENDPOINT']
if endpoint:
schema_url = '%s/%s' % (self.api_prefix, endpoint)
# add schema collections url
self.add_url_rule(schema_url, 'schema_collection',
view_func=schema_collection_endpoint,
methods=['GET', 'OPTIONS'])
# add schema item url
self.add_url_rule(schema_url + '/<resource>', 'schema_item',
view_func=schema_item_endpoint,
methods=['GET', 'OPTIONS'])
def __call__(self, environ, start_response):
""" If HTTP_X_METHOD_OVERRIDE is included with the request and method
override is allowed, make sure the override method is returned to Eve
as the request method, so normal routing and method validation can be
performed.
"""
if self.config['ALLOW_OVERRIDE_HTTP_METHOD']:
environ['REQUEST_METHOD'] = environ.get(
'HTTP_X_HTTP_METHOD_OVERRIDE',
environ['REQUEST_METHOD']).upper()
return super(Eve, self).__call__(environ, start_response)
|
mugurrus/eve
|
eve/flaskapp.py
|
Python
|
bsd-3-clause
| 40,449
|
import asyncio
import threading
import time
import pytest
from asgiref.sync import ThreadSensitiveContext, async_to_sync, sync_to_async
contextvars = pytest.importorskip("contextvars")
foo = contextvars.ContextVar("foo")
@pytest.mark.asyncio
async def test_thread_sensitive_with_context_different():
result_1 = {}
result_2 = {}
@sync_to_async
def store_thread(result):
result["thread"] = threading.current_thread()
async def fn(result):
async with ThreadSensitiveContext():
await store_thread(result)
# Run it (in true parallel!)
await asyncio.wait(
[asyncio.create_task(fn(result_1)), asyncio.create_task(fn(result_2))]
)
# They should not have run in the main thread, and on different threads
assert result_1["thread"] != threading.current_thread()
assert result_1["thread"] != result_2["thread"]
@pytest.mark.asyncio
async def test_sync_to_async_contextvars():
"""
Tests to make sure that contextvars from the calling context are
present in the called context, and that any changes in the called context
are then propagated back to the calling context.
"""
# Define sync function
def sync_function():
time.sleep(1)
assert foo.get() == "bar"
foo.set("baz")
return 42
# Ensure outermost detection works
# Wrap it
foo.set("bar")
async_function = sync_to_async(sync_function)
assert await async_function() == 42
assert foo.get() == "baz"
def test_async_to_sync_contextvars():
"""
Tests to make sure that contextvars from the calling context are
present in the called context, and that any changes in the called context
are then propagated back to the calling context.
"""
# Define sync function
async def async_function():
await asyncio.sleep(1)
assert foo.get() == "bar"
foo.set("baz")
return 42
# Ensure outermost detection works
# Wrap it
foo.set("bar")
sync_function = async_to_sync(async_function)
assert sync_function() == 42
assert foo.get() == "baz"
|
django/asgiref
|
tests/test_sync_contextvars.py
|
Python
|
bsd-3-clause
| 2,121
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ziggurat_foundations.models.base import get_db_session
from ziggurat_foundations.models.services import BaseService
__all__ = ["UserResourcePermissionService"]
class UserResourcePermissionService(BaseService):
@classmethod
def get(cls, user_id, resource_id, perm_name, db_session=None):
"""
Fetch row using primary key -
will use existing object in session if already present
:param user_id:
:param resource_id:
:param perm_name:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
return db_session.query(cls.model).get([user_id, resource_id, perm_name])
@classmethod
def by_resource_user_and_perm(
cls, user_id, perm_name, resource_id, db_session=None
):
"""
return all instances by user name, perm name and resource id
:param user_id:
:param perm_name:
:param resource_id:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.model).filter(cls.model.user_id == user_id)
query = query.filter(cls.model.resource_id == resource_id)
query = query.filter(cls.model.perm_name == perm_name)
return query.first()
|
ergo/ziggurat_foundations
|
ziggurat_foundations/models/services/user_resource_permission.py
|
Python
|
bsd-3-clause
| 1,373
|
"""
All metrics in this file are called by cohorts.analyze_cohorts_for_model and follow the format:
function_name(cohort, start_date, end_date)
"""
from datetime import timedelta
def example_metric(cohort, start_date, end_date):
"""An example metric that returns the number of members in a queryset
:param cohorts.Cohort cohort: The cohort to analyze
:param datetime.datetime start_date: The lower bounds of the date range to analyze
:param datetime.datetime end_date: The upper bounds of the date range to analyze
:return: A list of metric results to be added to the analysis dictionary
"""
result = []
window_start_date = start_date
window_end_date = window_start_date + timedelta(weeks=1)
while window_end_date < end_date:
result.append(cohort.queryset.count())
window_start_date += window_end_date + timedelta(days=1)
window_end_date += window_start_date + timedelta(weeks=1)
return result
|
jturner30/django_cohort_analysis
|
django_cohort_analysis/metrics.py
|
Python
|
bsd-3-clause
| 965
|
import cPickle
from datetime import timedelta
from uuid import uuid4
from redis import Redis
from werkzeug.datastructures import CallbackDict
from flask.sessions import SessionInterface, SessionMixin
class RedisSession(CallbackDict, SessionMixin):
def __init__(self, initial=None, sid=None, new=False):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
self.new = new
self.modifed = False
class RedisSessionInterface(SessionInterface):
serializer = cPickle
session_class = RedisSession
def __init__(self, redis=None, prefix='session:'):
if redis is None:
redis = Redis()
self.redis = redis
self.prefix = prefix
def generate_sid(self):
return str(uuid4())
def get_redis_expiration_time(self, app, session):
if session.permanent:
return app.permanent_session_lifetime
return timedelta(days=1)
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if not sid:
sid = self.generate_sid()
return self.session_class(sid=sid, new=True)
val = self.redis.get(self.prefix + sid)
if val is not None:
data = self.serializer.loads(val)
return self.session_class(data, sid=sid)
return self.session_class(sid=sid, new=True)
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
if not session:
self.redis.delete(self.prefix + session.sid)
if session.modified:
response.delete_cookie(app.session_cookie_name, domain=domain)
return
redis_exp = self.get_redis_expiration_time(app, session)
cookie_exp = self.get_expiration_time(app, session)
val = self.serializer.dumps(dict(session), protocol=-1)
self.redis.setex(self.prefix + session.sid, val,
int(redis_exp.total_seconds()))
response.set_cookie(app.session_cookie_name, session.sid,
expires=cookie_exp, httponly=True, domain=domain)
|
datamade/geomancer
|
geomancer/redis_session.py
|
Python
|
mit
| 2,183
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class VirtualNetworkUsagePaged(Paged):
"""
A paging container for iterating over a list of :class:`VirtualNetworkUsage <azure.mgmt.network.v2017_08_01.models.VirtualNetworkUsage>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[VirtualNetworkUsage]'}
}
def __init__(self, *args, **kwargs):
super(VirtualNetworkUsagePaged, self).__init__(*args, **kwargs)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/virtual_network_usage_paged.py
|
Python
|
mit
| 987
|
class Node(object):
def __init__(self,val, left = None, right = None):
self.val = val
self.left = left
self.right = right
def root_to_leaf(root,path):
if root.left is None and root.right is None:
path.append(root.val)
print(" ".join(map(str,path)))
return
path.append(root.val)
root_to_leaf(root.left, path)
path.pop()
root_to_leaf(root.right, path)
path.pop()
tree = Node(1,Node(2,Node(4),Node(5)),Node(3,Node(6),Node(7)))
root_to_leaf(tree,[])
|
bkpathak/HackerRank-Problems
|
python/tree/root_to_leaf.py
|
Python
|
mit
| 526
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
# global source
# source = RealApplianceSource(
# filename='/data/dk3810/ukdale.h5',
# appliances=[
# ['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television'
# # 'dish washer',
# # ['washer dryer', 'washing machine']
# ],
# max_appliance_powers=[2500] * 5,
# on_power_thresholds=[5] * 5,
# max_input_power=2500,
# min_on_durations=[60, 60, 60, 1800, 1800],
# min_off_durations=[12, 12, 12, 1800, 600],
# window=("2013-06-01", "2014-07-01"),
# seq_length=1520,
# output_one_appliance=False,
# boolean_targets=False,
# train_buildings=[1],
# validation_buildings=[1],
# skip_probability=0.7,
# n_seq_per_batch=25,
# input_padding=4,
# include_diff=False,
# clip_appliance_power=False
# )
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.1, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 5,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
# 'W': Uniform()
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
mmottahedi/neuralnilm_prototype
|
scripts/e201.py
|
Python
|
mit
| 6,739
|
import importlib
from markupupdowndown.config import load_main_config, ConfigException
class PluginException(Exception):
pass
PLUGINS_MODULE_PREFIX = 'upup'
def load_plugin_modules(config):
if 'plugins' not in config:
raise PluginException("No plugins found in config")
plugin_dict = dict()
for plugin in config['plugins']:
module_name = '%s_%s' % (PLUGINS_MODULE_PREFIX, plugin)
plugin_module = importlib.import_module(module_name)
plugin_dict[plugin] = plugin_module
return plugin_dict
def load_plugin_function(config, plugin_name, function_name):
plugin_dict = load_plugin_modules(config)
if plugin_name not in plugin_dict:
err_msg = "Plugin name does not exist: %s"
raise PluginException(err_msg % (plugin_name))
plugin_module = plugin_dict[plugin_name]
if not hasattr(plugin_module, function_name):
err_msg = "Plugin %s missing `%s` function"
raise PluginException(err_msg % (plugin_name, function_name))
function = getattr(plugin_module, function_name)
return function
def cmd_function_name(plugin_name, sub_cmd_name):
cmd_name = plugin_name
if sub_cmd_name:
cmd_name = sub_cmd_name
return 'cmd_%s' % cmd_name
def load_plugin_cmd_parsers(config, sub_cmd_name=None):
plugin_dict = load_plugin_modules(config)
parsers = list()
for name, plugin in plugin_dict.items():
func_name = cmd_function_name(name, sub_cmd_name)
if hasattr(plugin, func_name):
init_cmd_parser = getattr(plugin, func_name)
parsers.append(init_cmd_parser)
return parsers
def add_plugin_cmd_parsers(subparsers, sub_cmd_name=None):
try:
config = load_main_config()
for cmd_parser in load_plugin_cmd_parsers(
config, sub_cmd_name=sub_cmd_name
):
cmd_parser(subparsers)
# Happens when command is called outside project directory
except ConfigException:
pass
# Happens when no plugins are configured
except PluginException:
pass
|
MarkUpUpDownDown/markupupdowndown
|
markupupdowndown/plugins/__init__.py
|
Python
|
mit
| 2,093
|
import os
import argparse
from flask import current_app
from flask.ext.script import Manager
from alembic import __version__ as __alembic_version__
from alembic.config import Config as AlembicConfig
from alembic import command
alembic_version = tuple([int(v) for v in __alembic_version__.split('.')[0:3]])
class _MigrateConfig(object):
def __init__(self, migrate, db, **kwargs):
self.migrate = migrate
self.db = db
self.directory = migrate.directory
self.configure_args = kwargs
@property
def metadata(self):
"""
Backwards compatibility, in old releases app.extensions['migrate']
was set to db, and env.py accessed app.extensions['migrate'].metadata
"""
return self.db.metadata
class Config(AlembicConfig):
def get_template_directory(self):
package_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_dir, 'templates')
class Migrate(object):
def __init__(self, app=None, db=None, directory='migrations', **kwargs):
self.configure_callbacks = []
self.directory = directory
if app is not None and db is not None:
self.init_app(app, db, directory, **kwargs)
def init_app(self, app, db, directory=None, **kwargs):
self.directory = directory or self.directory
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['migrate'] = _MigrateConfig(self, db, **kwargs)
def configure(self, f):
self.configure_callbacks.append(f)
return f
def call_configure_callbacks(self, config):
for f in self.configure_callbacks:
config = f(config)
return config
def get_config(self, directory, x_arg=None, opts=None):
if directory is None:
directory = self.directory
config = Config(os.path.join(directory, 'alembic.ini'))
config.set_main_option('script_location', directory)
if config.cmd_opts is None:
config.cmd_opts = argparse.Namespace()
for opt in opts or []:
setattr(config.cmd_opts, opt, True)
if x_arg is not None:
if not getattr(config.cmd_opts, 'x', None):
setattr(config.cmd_opts, 'x', [x_arg])
else:
config.cmd_opts.x.append(x_arg)
return self.call_configure_callbacks(config)
MigrateCommand = Manager(usage='Perform database migrations')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('--multidb', dest='multidb', action='store_true',
default=False,
help=("Multiple databases migraton (default is "
"False)"))
def init(directory=None, multidb=False):
"""Generates a new migration"""
if directory is None:
directory = current_app.extensions['migrate'].directory
config = Config()
config.set_main_option('script_location', directory)
config.config_file_name = os.path.join(directory, 'alembic.ini')
config = current_app.extensions['migrate'].\
migrate.call_configure_callbacks(config)
if multidb:
command.init(config, directory, 'flask-multidb')
else:
command.init(config, directory, 'flask')
@MigrateCommand.option('--rev-id', dest='rev_id', default=None,
help=('Specify a hardcoded revision id instead of '
'generating one'))
@MigrateCommand.option('--version-path', dest='version_path', default=None,
help=('Specify specific path from config for version '
'file'))
@MigrateCommand.option('--branch-label', dest='branch_label', default=None,
help=('Specify a branch label to apply to the new '
'revision'))
@MigrateCommand.option('--splice', dest='splice', action='store_true',
default=False,
help=('Allow a non-head revision as the "head" to '
'splice onto'))
@MigrateCommand.option('--head', dest='head', default='head',
help=('Specify head revision or <branchname>@head to '
'base new revision on'))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('--autogenerate', dest='autogenerate',
action='store_true', default=False,
help=('Populate revision script with andidate migration '
'operatons, based on comparison of database to '
'model'))
@MigrateCommand.option('-m', '--message', dest='message', default=None)
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def revision(directory=None, message=None, autogenerate=False, sql=False,
head='head', splice=False, branch_label=None, version_path=None,
rev_id=None):
"""Create a new revision file."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.revision(config, message, autogenerate=autogenerate, sql=sql,
head=head, splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
else:
command.revision(config, message, autogenerate=autogenerate, sql=sql)
@MigrateCommand.option('--rev-id', dest='rev_id', default=None,
help=('Specify a hardcoded revision id instead of '
'generating one'))
@MigrateCommand.option('--version-path', dest='version_path', default=None,
help=('Specify specific path from config for version '
'file'))
@MigrateCommand.option('--branch-label', dest='branch_label', default=None,
help=('Specify a branch label to apply to the new '
'revision'))
@MigrateCommand.option('--splice', dest='splice', action='store_true',
default=False,
help=('Allow a non-head revision as the "head" to '
'splice onto'))
@MigrateCommand.option('--head', dest='head', default='head',
help=('Specify head revision or <branchname>@head to '
'base new revision on'))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('-m', '--message', dest='message', default=None)
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def migrate(directory=None, message=None, sql=False, head='head', splice=False,
branch_label=None, version_path=None, rev_id=None):
"""Alias for 'revision --autogenerate'"""
config = current_app.extensions['migrate'].migrate.get_config(
directory, opts=['autogenerate'])
if alembic_version >= (0, 7, 0):
command.revision(config, message, autogenerate=True, sql=sql,
head=head, splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
else:
command.revision(config, message, autogenerate=True, sql=sql)
@MigrateCommand.option('revision', nargs='?', default='head',
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def edit(revision='current', directory=None):
"""Edit current revision."""
if alembic_version >= (0, 8, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.edit(config, revision)
else:
raise RuntimeError('Alembic 0.8.0 or greater is required')
@MigrateCommand.option('--rev-id', dest='rev_id', default=None,
help=('Specify a hardcoded revision id instead of '
'generating one'))
@MigrateCommand.option('--branch-label', dest='branch_label', default=None,
help=('Specify a branch label to apply to the new '
'revision'))
@MigrateCommand.option('-m', '--message', dest='message', default=None)
@MigrateCommand.option('revisions', nargs='+',
help='one or more revisions, or "heads" for all heads')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def merge(directory=None, revisions='', message=None, branch_label=None,
rev_id=None):
"""Merge two revisions together. Creates a new migration file"""
if alembic_version >= (0, 7, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.merge(config, revisions, message=message,
branch_label=branch_label, rev_id=rev_id)
else:
raise RuntimeError('Alembic 0.7.0 or greater is required')
@MigrateCommand.option('--tag', dest='tag', default=None,
help=("Arbitrary 'tag' name - can be used by custom "
"env.py scripts"))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('revision', nargs='?', default='head',
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('-x', '--x-arg', dest='x_arg', default=None,
help=("Additional arguments consumed by "
"custom env.py scripts"))
def upgrade(directory=None, revision='head', sql=False, tag=None, x_arg=None):
"""Upgrade to a later version"""
config = current_app.extensions['migrate'].migrate.get_config(directory,
x_arg=x_arg)
command.upgrade(config, revision, sql=sql, tag=tag)
@MigrateCommand.option('--tag', dest='tag', default=None,
help=("Arbitrary 'tag' name - can be used by custom "
"env.py scripts"))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('revision', nargs='?', default="-1",
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('-x', '--x-arg', dest='x_arg', default=None,
help=("Additional arguments consumed by "
"custom env.py scripts"))
def downgrade(directory=None, revision='-1', sql=False, tag=None, x_arg=None):
"""Revert to a previous version"""
config = current_app.extensions['migrate'].migrate.get_config(directory,
x_arg=x_arg)
if sql and revision == '-1':
revision = 'head:-1'
command.downgrade(config, revision, sql=sql, tag=tag)
@MigrateCommand.option('revision', nargs='?', default="head",
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def show(directory=None, revision='head'):
"""Show the revision denoted by the given symbol."""
if alembic_version >= (0, 7, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.show(config, revision)
else:
raise RuntimeError('Alembic 0.7.0 or greater is required')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-r', '--rev-range', dest='rev_range', default=None,
help='Specify a revision range; format is [start]:[end]')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def history(directory=None, rev_range=None, verbose=False):
"""List changeset scripts in chronological order."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.history(config, rev_range, verbose=verbose)
else:
command.history(config, rev_range)
@MigrateCommand.option('--resolve-dependencies', dest='resolve_dependencies',
action='store_true', default=False,
help='Treat dependency versions as down revisions')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def heads(directory=None, verbose=False, resolve_dependencies=False):
"""Show current available heads in the script directory"""
if alembic_version >= (0, 7, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.heads(config, verbose=verbose,
resolve_dependencies=resolve_dependencies)
else:
raise RuntimeError('Alembic 0.7.0 or greater is required')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def branches(directory=None, verbose=False):
"""Show current branch points"""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.branches(config, verbose=verbose)
else:
command.branches(config)
@MigrateCommand.option('--head-only', dest='head_only', action='store_true',
default=False,
help='Deprecated. Use --verbose for additional output')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def current(directory=None, verbose=False, head_only=False):
"""Display the current revision for each database."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.current(config, verbose=verbose, head_only=head_only)
else:
command.current(config)
@MigrateCommand.option('--tag', dest='tag', default=None,
help=("Arbitrary 'tag' name - can be used by custom "
"env.py scripts"))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('revision', default=None, help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("migration script directory (default is "
"'migrations')"))
def stamp(directory=None, revision='head', sql=False, tag=None):
"""'stamp' the revision table with the given revision; don't run any
migrations"""
config = current_app.extensions['migrate'].migrate.get_config(directory)
command.stamp(config, revision, sql=sql, tag=tag)
|
louiskun/flaskGIT
|
venv/lib/python2.7/site-packages/flask_migrate/__init__.py
|
Python
|
mit
| 17,342
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-20 10:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aggregator', '0015_dataset_dataset_user'),
('query_designer', '0015_remove_abstractquery_dataset_query'),
]
operations = [
migrations.AddField(
model_name='abstractquery',
name='dataset_query',
field=models.ManyToManyField(to='aggregator.Dataset'),
),
]
|
dipapaspyros/bdo_platform
|
query_designer/migrations/0016_abstractquery_dataset_query.py
|
Python
|
mit
| 553
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/droid/shared_crafting_module_clothing.iff"
result.attribute_template_id = -1
result.stfName("craft_droid_ingredients_n","crafting_module_clothing")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/component/droid/shared_crafting_module_clothing.py
|
Python
|
mit
| 494
|