code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
from logging import getLogger
from .units import ok
from .validation import validate_duration
from threading import Condition, Thread
log = getLogger("failover.background")
class Background(Thread):
"""
Background(task, delay, start=ok)
Create a Background object that invokes an underlying health check task
asynchronously and saves the state.
This is used to cache health check results that may be expensive to
compute. For example, an application server may choose to implement checks
on each of its dependencies, taking upwards of a minute to fully execute.
Meanwhile, a load balancer fronting the server might be hard-wired to query
the application server every 5 seconds; using a Background object to proxy
these requests will prevent the server from filling up with health check
tasks.
"""
def __init__(self, task, delay, initial_state=ok, start_thread=True):
super(Background, self).__init__()
self.task = task
self.state = initial_state
self.delay = validate_duration(delay, "delay")
self.lock = Condition()
self.exit_requested = False
if start_thread:
self.start()
return
def run(self):
with self.lock:
while not self.exit_requested:
# Allow another thread to request this thread to stop.
self.lock.wait(self.delay)
if self.exit_requested:
break
try:
self.state = self.task()
except Exception as e:
log.error("Failed to execute background task: %s", e,
exc_info=True)
return
def __call__(self):
return self.state
def stop(self):
with self.lock:
self.exit_requested = True
self.lock.notify()
self.join()
return
|
dacut/FailoverSample
|
failover/background.py
|
Python
|
bsd-2-clause
| 2,003
|
import ptypes
from ptypes import *
# integral types
class u8(pint.uint8_t): pass
class u16(pint.uint16_t): pass
class u32(pint.uint32_t): pass
class u64(pint.uint64_t): pass
class s8(pint.sint8_t): pass
class s16(pint.sint16_t): pass
class s32(pint.sint32_t): pass
class s64(pint.sint64_t): pass
# lzh-specific integrals
class method_id(pstr.string):
length = 5
def set(self, value):
if not isinstance(value, tuple):
return super(Signature._method, self).set(value)
type, version = value
if type == 'lh':
versionmap = '0123456789abcdef'
if version is None:
version = versionmap.index('d')
elif version == 'x':
return super(Signature._method, self).set('-lhx-')
try:
res = '-lh{:s}-'.format(versionmap[version])
except (IndexError, TypeError):
raise NotImplementedError((type, version))
return super(Signature._method, self).set(res)
elif type in {'pc', 'pm'}:
versionmap = '012'
if version is None:
res = '-{:s}0-'.format(type)
return super(Signature._method, self).set(res)
elif version == 's':
res = '-{:s}s-'.format(type)
return super(Signature._method, self).set(res)
try:
res = '-{:s}{:s}-'.format(type, versionmap[version])
except (IndexError, TypeError):
raise NotImplementedError((type, version))
return super(Signature._method, self).set(res)
elif type == 'lz':
versionmap = '012345678'
if version == 's':
res = '-lzs-'
return super(Signature._method, self).set(res)
elif version is None:
res = '-lz4-'
return super(Signature._method, self).set(res)
try:
res = '-lz{:s}-'.format(versionmap[version])
except (IndexError, TypeError):
raise NotImplementedError((type, version))
return super(Signature._method, self).set(res)
raise NotImplementedError((type, version))
def get(self):
res = self.str()
if res.startswith('-') and res.endswith('-'):
res = res[1:-1]
if res.startswith('lh'):
versionmap = '0123456789abcdef'
res = res[2:]
if res == 'd':
return 'lh', None
elif res == 'x':
return 'lh', 'x'
return 'lh', versionmap.index(res)
elif res.startswith('pc') or res.startswith('pm'):
type, version = res[:2], res[2:]
versionmap = '012'
if version == 's':
return type, version
return type, versionmap.index(version)
elif res.startswith('lz'):
versionmap = '012345678'
type, version = res[:2], res[2:]
if version == 's':
return 'lz', version
elif version == '4':
return 'lz', None
return 'lz', versionmap.index(version)
raise NotImplementedError
raise ValueError(res)
# extension header levels
class Level(ptype.definition): cache = {}
@Level.define
class Level0(pstruct.type):
type = 0
def __filename(self):
res = self['filename-length'].li.int()
return dyn.clone(pstr.string, length=res)
_fields_ = [
(u8, 'filename-length'),
(__filename, 'filename'),
(u16, 'crc'),
]
@Level.define
class Level1(pstruct.type):
type = 1
def __filename(self):
res = self['filename-length'].li.int()
return dyn.clone(pstr.string, length=res)
_fields_ = [
(u8, 'filename-length'),
(__filename, 'filename'),
(u16, 'crc'),
(u8, 'os-identifier'),
(u16, 'next-header-size'),
]
# base structures
class Signature(pstruct.type):
_fields_ = [
(u8, 'size'),
(u8, 'checksum'),
(method_id, 'method'),
]
class Attributes(pstruct.type):
class _timestamp(u32): pass
class _attribute(u8): pass
_fields_ = [
(u32, 'compressed-size'),
(u32, 'uncompressed-size'),
(_timestamp, 'timestamp'),
(_attribute, 'file-attribute'),
(u8, 'level-identifier'),
]
def Level(self):
return self['level-identifier'].int()
class Header(pstruct.type):
def __extended(self):
res = self['attributes'].li
return Level.lookup(res.Level())
def __padding_header(self):
res = self['signature'].li
cb = res['size'].int()
total = 2 + sum(self[fld].li.size() for fld in ['signature', 'attributes', 'extended'])
return dyn.block(max(0, cb - total))
_fields_ = [
(Signature, 'signature'),
(Attributes, 'attributes'),
(__extended, 'extended'),
(__padding_header, 'padding'),
]
class File(pstruct.type):
def __data(self):
res = self['header'].li
return dyn.block(res['attributes']['compressed-size'].int())
_fields_ = [
(Header, 'header'),
(__data, 'data'),
]
if __name__ == '__main__':
import ptypes, archive.lha
ptypes.setsource(ptypes.prov.file('c:/users/user/Downloads/fcgb2.lzh', mode='r'))
z = archive.lha.File()
z = z.l
print(z.source.size())
print(z['header']['signature'])
print(z['header']['attributes'])
print(z['header'])
print(z['header']['filename'])
|
arizvisa/syringe
|
template/archive/lha.py
|
Python
|
bsd-2-clause
| 5,683
|
# -*- coding: utf-8 -*-
#
# generic tools for apps
#
def format_float(number): # {{{
"""
format the display style of a float number
"""
threshold_min = 0.001
threshold_max = 9999.9
fix_fmt = '{:,.1f}' # comma as a thousands separator
exp_fmt = '{:.1e}'
#
if isinstance(number, int) or isinstance(number, float):
#return type(number)
pass
else:
return False
#
if (number > threshold_max) or (number < threshold_min):
str = exp_fmt.format(number)
else:
str = fix_fmt.format(number)
#
return str
# }}}
|
liweitianux/97dev
|
97suifangqa/apps/utils/tools.py
|
Python
|
bsd-2-clause
| 644
|
__version__ = "1.4.7"
|
eRestin/Mezz
|
mezzanine/__init__.py
|
Python
|
bsd-2-clause
| 23
|
# test_collection.py
from rctk.tests.base import BaseTest
from rctk.widgets import StaticText
class BaseCollectionTest(BaseTest):
""" Test basic collection behaviour. """
item = StaticText
collection = None
def create_collection(self):
return self.collection(self.tk, self.item)
def test_empty(self):
c = self.create_collection()
assert len(c._items) == 0
assert len(c._controls) == 0
def test_append(self):
c = self.create_collection()
c.append('Foo')
assert len(c._items) == 1
assert len(c._controls) == 1
def test_remove(self):
c = self.create_collection()
c.append('Foo')
c.remove('Foo')
assert len(c._items) == 0
assert len(c._controls) == 0
def test_extend(self):
c = self.create_collection()
c.extend(['Foo', 'Bar'])
assert len(c._items) == 2
assert len(c._controls) == 2
def test_clear(self):
c = self.create_collection()
c.extend(['Foo', 'Bar'])
c.clear()
assert len(c._items) == 0
assert len(c._controls) == 0
from rctk.widgets import Collection
class TestCollectionWidget(BaseCollectionTest):
collection = Collection
|
rctk/rctk
|
rctk/tests/test_collection.py
|
Python
|
bsd-2-clause
| 1,284
|
#!/usr/bin/env python
"""
Copyright (C) Jeremy O'Donoghue, 2003
License: This work is licensed under the PSF. A copy should be included
with this source code, and is also available at
http://www.python.org/psf/license.html
This is a sample showing how to embed a matplotlib figure in a wxPanel,
and update the contents whenever a timer event occurs. It is inspired
by the GTK script dynamic_demo.py, by John Hunter (should be supplied with
this file) but I have assumed that you may wish to embed a figure inside
your own arbitrary frame, which makes the code slightly more complicated.
It goes without saying that you can update the display on any event, not
just a timer...
Should you require a toolbar and navigation, inspire yourself from
embedding_in_wx.py, which provides these features.
Modification History:
$Log$
Revision 1.7 2005/06/15 20:24:56 jdh2358
syncing for 82
Revision 1.6 2004/10/26 18:08:13 astraw
Converted to use new NavigationToolbar2 (from old Toolbar).
Revision 1.5 2004/06/26 06:37:20 astraw
Trivial bugfix to eliminate IndexError
Revision 1.4 2004/05/03 12:12:26 jdh2358
added bang header to examples
Revision 1.3 2004/03/08 22:17:20 jdh2358
* Fixed embedding_in_wx and dynamic_demo_wx examples
* Ported build to darwin
* Tk:
removed default figman=None from nav toolbar since it needs the
figman
fixed close bug
small changes to aid darwin build
Revision 1.2 2004/02/26 20:22:58 jaytmiller
Added the "numerix" Numeric/numarray selector module enabling matplotlib
to work with either numarray or Numeric. See matplotlib.numerix.__doc__.
Revision 1.1 2003/12/30 17:22:09 jodonoghue
First version of dynamic_demo for backend_wx
"""
import matplotlib
matplotlib.use('WX')
from matplotlib.backends.backend_wx import FigureCanvasWx,\
FigureManager, NavigationToolbar2Wx
from matplotlib.figure import Figure
import numpy
from wx import *
TIMER_ID = NewId()
class PlotFigure(Frame):
def __init__(self):
Frame.__init__(self, None, -1, "Test embedded wxFigure")
self.fig = Figure((5,4), 75)
self.canvas = FigureCanvasWx(self, -1, self.fig)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
# On Windows, default frame size behaviour is incorrect
# you don't need this under Linux
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(Size(fw, th))
# Create a figure manager to manage things
self.figmgr = FigureManager(self.canvas, 1, self)
# Now put all into a sizer
sizer = BoxSizer(VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, LEFT|TOP|GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, GROW)
self.SetSizer(sizer)
self.Fit()
EVT_TIMER(self, TIMER_ID, self.onTimer)
def init_plot_data(self):
a = self.fig.add_subplot(111)
self.ind = numpy.arange(60)
tmp = []
for i in range(60):
tmp.append(numpy.sin((self.ind+i)*numpy.pi/15))
self.X = numpy.array(tmp)
self.lines = a.plot(self.X[:,0],'o')
self.count = 0
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def onTimer(self, evt):
self.count += 1
if self.count >= 60: self.count = 0
self.lines[0].set_data(self.ind, self.X[:,self.count])
self.canvas.draw()
self.canvas.gui_repaint()
if __name__ == '__main__':
app = PySimpleApp()
frame = PlotFigure()
frame.init_plot_data()
# Initialise the timer - wxPython requires this to be connected to the
# receivicng event handler
t = Timer(frame, TIMER_ID)
t.Start(100)
frame.Show()
app.MainLoop()
|
sniemi/SamPy
|
sandbox/src1/examples/dynamic_demo_wx.py
|
Python
|
bsd-2-clause
| 3,927
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('comment02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Foo')
worksheet.write_comment('B2', 'Some text')
worksheet.write_comment('D17', 'More text')
# Set the author to match the target XLSX file.
worksheet.set_comments_author('John')
workbook.close()
self.assertExcelEqual()
|
jmcnamara/XlsxWriter
|
xlsxwriter/test/comparison/test_comment02.py
|
Python
|
bsd-2-clause
| 1,011
|
from gevent import monkey
# monkey.patch_all(aggressive=False)
monkey.patch_socket()
monkey.patch_thread()
monkey.patch_time()
monkey.patch_ssl()
from JumpScale import j
from gevent.pywsgi import WSGIServer
import JumpScale.grid.serverbase
import time
import gevent
class GeventWSServer():
def __init__(self, addr, port, sslorg=None, ssluser=None, sslkeyvaluestor=None):
"""
@param handler is passed as a class
"""
self.port = port
self.addr = addr
self.key = "1234"
self.nr = 0
# self.jobhandler = JobHandler()
self.daemon = j.servers.base.getDaemon(sslorg=sslorg, ssluser=ssluser, sslkeyvaluestor=sslkeyvaluestor)
self.server = WSGIServer(('', self.port), self.rpcRequest)
self.type = "geventws"
self.greenlets = {}
self.now = 0
self.fiveMinuteId=0
self.hourId=0
self.dayId=0
def startClock(self,obj=None):
self.schedule("timer", self._timer)
self.schedule("timer2", self._timer2)
if obj<>None:
obj.now=self.now
obj.fiveMinuteId=self.fiveMinuteId
obj.hourId=self.hourId
obj.dayId=self.dayId
def _timer(self):
"""
will remember time every 1 sec
"""
# lfmid = 0
while True:
self.now = time.time()
print "timer"
gevent.sleep(1)
def _timer2(self):
"""
will remember time every 1 sec
"""
# lfmid = 0
while True:
self.fiveMinuteId=j.base.time.get5MinuteId(self.now )
self.hourId=j.base.time.getHourId(self.now )
self.dayId=j.base.time.getDayId(self.now )
print "timer2"
gevent.sleep(200)
def schedule(self, name, ffunction, *args, **kwargs):
self.greenlets[name] = gevent.greenlet.Greenlet(ffunction, *args, **kwargs)
self.greenlets[name].start()
return self.greenlets[name]
def responseRaw(self,data,start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [data]
def responseNotFound(self,start_response):
start_response('404 Not Found', [('Content-Type', 'text/html')])
return ['<h1>Not Found</h1>']
def rpcRequest(self, environ, start_response):
if environ["CONTENT_TYPE"]=='application/raw' and environ["REQUEST_METHOD"]=='POST':
data=environ["wsgi.input"].read()
category, cmd, data2, informat, returnformat, sessionid = j.servers.base._unserializeBinSend(data)
resultcode, returnformat, result = self.daemon.processRPCUnSerialized(cmd, informat, returnformat, data2, sessionid, category=category)
data3 = j.servers.base._serializeBinReturn(resultcode, returnformat, result)
return self.responseRaw(data3,start_response)
else:
return self.responseNotFound(start_response)
# def router(self, environ, start_response):
# path = environ["PATH_INFO"].lstrip("/")
# if path == "" or path.rstrip("/") == "wiki":
# path == "wiki/system"
# print "path:%s" % path
# if path.find("favicon.ico") != -1:
# return self.processor_page(environ, start_response, self.filesroot, "favicon.ico", prefix="")
# ctx = RequestContext(application="", actor="", method="", env=environ,
# start_response=start_response, path=path, params=None)
# ctx.params = self._getParamsFromEnv(environ, ctx)
def start(self):
print "started on %s" % self.port
self.server.serve_forever()
def addCMDsInterface(self, MyCommands, category="",proxy=False):
self.daemon.addCMDsInterface(MyCommands, category,proxy=proxy)
|
Jumpscale/jumpscale6_core
|
lib/JumpScale/grid/geventws/GeventWSServer.py
|
Python
|
bsd-2-clause
| 3,848
|
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Ecs20130110DescribeInstanceAttributeRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.InstanceId = None
def getapiname(self):
return 'ecs.aliyuncs.com.DescribeInstanceAttribute.2013-01-10'
|
wanghe4096/website
|
aliyun/api/rest/Ecs20130110DescribeInstanceAttributeRequest.py
|
Python
|
bsd-2-clause
| 356
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
import re
import unicodedata
import nltk
import numpy as np
from numpy import bincount
import scipy.sparse as sp
def _make_int_array():
return array.array(str("i"))
def _document_frequency(X):
return bincount(X.indices, minlength=X.shape[1])
class tfidTransformer():
def fit(self, X, y=None):
n_samples, n_features = X.shape
df = _document_frequency(X)
#df += int(self.smooth_idf)
#n_samples += int(self.smooth_idf)
idf = np.log10(float(n_samples)/df)
self._idf_diag = sp.spdiags(idf,diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
X = sp.csr_matrix(X, copy=copy)
X = X * self._idf_diag
return X
class TfidfVectorizer():
def __init__(self,stop_words=None):
self.fixed_vocabulary_ = False
self.token_pattern = r"(?u)\b\w\w+\b"
self.strip_accents = None
self.encoding = 'utf-8'
self.decode_error = 'strict'
self.dtype = np.float64
self.smooth_idf = True
self._tfidf = tfidTransformer()
self.stop_words = stop_words
def decode(self, doc):
if isinstance(doc, bytes):
print("isinstance")
doc = doc.decode(self.encoding, self.decode_error)
return doc
def _count_vocab(self, raw_documents, fixed_vocab):
if fixed_vocab:
vocabulary = self.vocabulary_
else:
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
continue
indptr.append(len(j_indices))
if not fixed_vocab:
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary")
j_indices = np.frombuffer(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr)-1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def _word_ngrams(self, tokens, stop_words=None):
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
return tokens
def build_preprocessor(self):
noop = lambda x: x
strip_accents = noop
return lambda x: strip_accents(x.lower())
def build_tokenizer(self):
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def build_analyzer(self):
preprocess = self.build_preprocessor()
stop_words = frozenset(nltk.corpus.stopwords.words('spanish'))
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(tokenize(preprocess(self.decode(doc))), stop_words)
def fit_transform(self, raw_documents):
vocabulary, X = self._count_vocab(raw_documents,self.fixed_vocabulary_)
self.vocabulary_ = vocabulary
self._tfidf.fit(X)
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents):
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
return self._tfidf.transform(X, copy=False)
|
keyvhinng/sense-me
|
classifier/tfidf_vectorization/tfidf.py
|
Python
|
bsd-2-clause
| 3,741
|
"""
Generate
"""
vdw_radii = {}
vdw_radii[1] = 1.2
vdw_radii[6] = 1.5
vdw_radii[7] = 1.5
vdw_radii[8] = 1.4
vdw_radii[16] = 1.89
"""
"""
import numpy
EPS = 1.0e-8
def grid(ntes, r, z, rscal):
temp_coords = []
final_coords = []
for i, (ri, zi) in enumerate(zip(r, z)):
riscal = vdw_radii[zi] * rscal
icoords = unit_sphere( ntes )
icoords *= riscal
for n,nc in enumerate(icoords):
ncontact = 1
icoords[n] = ri - icoords[n]
for k, (rk, zk) in enumerate(zip(r, z)):
if i == k: continue
rkscal = vdw_radii[zk] * rscal
dr = icoords[n] - rk
dr2=numpy.dot( dr, dr )
if dr2 < rkscal**2:
ncontact += 1
if ncontact == 1:
final_coords.append( icoords[n] )
return numpy.array( final_coords )
def unit_sphere( ntes ):
neq = int(numpy.sqrt(numpy.pi * ntes))
nvt = neq / 2
coordinates = []
for i in range(nvt+1):
angle = numpy.pi * i / nvt
z = numpy.cos( angle )
xy = numpy.sin( angle )
nbo = int(xy*neq + EPS)
if nbo < 1: nbo = 1
for j in range(nbo):
angle2 = 2*numpy.pi * j / nbo
coordinates.append( numpy.array(
[xy*numpy.cos( angle2 ),
xy*numpy.sin( angle2 ),
z]))
return numpy.array(coordinates)
if __name__ == '__main__':
import sys
import obabel
mol = obabel.Molecule(sys.argv[1])
R = []
Z = []
for atom in mol.getAtoms():
R.append( numpy.array([atom.GetX(), atom.GetY(), atom.GetZ()]) )
Z.append(atom.GetAtomicNum())
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
alphas = [1.0, 0.6, 0.4, 0.2]
colors = ['#222222','#666666','#999999','#BABABA']
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
for i in range(4):
full_grid = grid(48, R, Z, 1.4+i*0.2)
x,y,z = full_grid.transpose()
ax.scatter(x,y,z,alpha=alphas[i],color=colors[i])
plt.show()
#plt.savefig('mep.png')
|
cstein/qfitlib
|
tools/python/connolly.py
|
Python
|
bsd-2-clause
| 2,192
|
from .version import __version__
from ecospold2matrix.ecospold2matrix import Ecospold2Matrix
|
majeau-bettez/ecospold2matrix
|
ecospold2matrix/__init__.py
|
Python
|
bsd-2-clause
| 94
|
'''freevolv.models'''
|
genevolv/dbrev
|
py/freevolv/models/__init__.py
|
Python
|
bsd-2-clause
| 22
|
"""
oscar_stripe.views
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 by Xavier Ordoquy,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.views.generic import FormView
class StripeView(FormView):
template_name = 'oscar_stripe/form.html'
def form_valid(form):
email = form.cleaned_data['stripeEmail']
token = form.cleaned_data['stripeToken']
print email, token
|
linovia/django-oscar-stripe
|
oscar_stripe/views.py
|
Python
|
bsd-3-clause
| 431
|
"""Test utility functions."""
import numpy as np
from compoda.utils import truncate_range, scale_range
def test_truncate_range():
"""Test range truncation."""
# Given
data = np.random.random(100)
data.ravel()[np.random.choice(data.size, 10, replace=False)] = 0
data.ravel()[np.random.choice(data.size, 5, replace=False)] = np.nan
p_min, p_max = 2.5, 97.5
expected = np.nanpercentile(data, [p_min, p_max])
# When
output = truncate_range(data, percMin=p_min, percMax=p_max,
discard_zeros=False)
# Then
assert all(np.nanpercentile(output, [0, 100]) == expected)
def test_scale_range():
"""Test range scaling."""
# Given
data = np.random.random(100) - 0.5
data.ravel()[np.random.choice(data.size, 10, replace=False)] = 0.
data.ravel()[np.random.choice(data.size, 5, replace=False)] = np.nan
s = 42. # scaling factor
expected = [0., s] # min and max
# When
output = scale_range(data, scale_factor=s, delta=0.01, discard_zeros=False)
# Then
assert all([np.nanmin(output) >= expected[0],
np.nanmax(output) < expected[1]])
|
ofgulban/tetrahydra
|
compoda/tests/test_utils.py
|
Python
|
bsd-3-clause
| 1,154
|
from django.conf.urls import patterns, url
from interim_cms.views import ExampleTileView
urlpatterns = patterns("",
url(r"^example-tile/$", ExampleTileView.as_view(), name="example-tile"),
)
|
praekelt/django-interim-cms
|
interim_cms/urls.py
|
Python
|
bsd-3-clause
| 198
|
"""Django email backend for celery."""
VERSION = (1, 0, 3, 1)
__version__ = '.'.join(map(str, VERSION))
__author__ = 'Paul McLanahan'
__contact__ = 'paul@mclanahan.net'
__homepage__ = 'http://bitbucket.org/pmclanahan/django-celery-email'
__license__ = 'BSD (3 clause)'
|
public/django-celery-email
|
djcelery_email/__init__.py
|
Python
|
bsd-3-clause
| 271
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_None/trend_LinearTrend/cycle_12/ar_12/test_artificial_128_None_LinearTrend_12_12_20.py
|
Python
|
bsd-3-clause
| 263
|
import gc
import operator
import os
import random
import dozer
import flask
import objgraph
app = flask.Flask(__name__)
app.secret_key = "it's a secret to everyone"
# Dozer
app.wsgi_app = dozer.Dozer(app.wsgi_app)
#
# Our leaky app -- simulate a leak by storing some objects in a global var.
#
LEAKY = []
class Leaker(object):
pass
@app.route('/')
def index():
for i in range(random.randint(0, 1000)):
LEAKY.append(Leaker())
return "drip drip"
@app.route('/growth')
def object_growth():
"""
Shows changes in allocations, like objgraph.show_growth(), except:
- show_growth() prints to stdout, this is flask view.
- this saves the peaks in the session, so that each user sees the changes
between their last page load, not some global.
- this function is commented :)
"""
# We don't want our numbers crudded up by a GC cycle that hasn't run yet,
# so force GC before we gather stats.
gc.collect()
# `typestats() `returns a dict of {type-name: count-of-allocations}. We'll
# compare the current count for each type to the previous count, stored
# in the session as `peak_stats`, and save the changes into `deltas`.
peak_stats = flask.session.get('peak_stats', {})
stats = objgraph.typestats()
deltas = {}
# For each type, look it the old count in `peak_stats`, defaulting to 0.
# We're really only interested in *growth* -- remember, we're looking for
# memory leaks -- so if the current count is greater than the peak count,
# we want to return that change in `deltas` and also note the new peak
# for later.
for name, count in stats.iteritems():
old_count = peak_stats.get(name, 0)
if count > old_count:
deltas[name] = count - old_count
peak_stats[name] = count
# We have to remember to store `peak_stats` back in the session, otherwise
# Flask won't notice that it's changed.
flask.session['peak_stats'] = peak_stats
# Create (type-name, delta) tuples, sorted by objects with the biggest growth.
deltas = sorted(deltas.items(), key=operator.itemgetter(1), reverse=True)
return flask.render_template('growth.html',
growth = deltas,
)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.environ.get('PORT', 5000)))
|
jacobian/python-leaks-demo
|
app.py
|
Python
|
bsd-3-clause
| 2,330
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-onedeploy
------------
Tests for `django-onedeploy` models module.
"""
from django.test import TestCase
from onedeploy import models
class TestOnedeploy(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
|
huogerac/django-onedeploy
|
tests/test_models.py
|
Python
|
bsd-3-clause
| 349
|
from base import AbstractLogicalFile, AbstractFileMetaData # noqa
from generic import GenericFileMetaData, GenericLogicalFile # noqa
from raster import GeoRasterFileMetaData, GeoRasterLogicalFile # noqa
from netcdf import NetCDFFileMetaData, NetCDFLogicalFile # noqa
from geofeature import GeoFeatureFileMetaData, GeoFeatureLogicalFile # noqa
from reftimeseries import RefTimeseriesFileMetaData, RefTimeseriesLogicalFile # noqa
from timeseries import TimeSeriesFileMetaData, TimeSeriesLogicalFile # noqa
|
ResearchSoftwareInstitute/MyHPOM
|
hs_file_types/models/__init__.py
|
Python
|
bsd-3-clause
| 530
|
from __future__ import print_function
import sys
import numpy as np
from astropy.wcs import WCSSUB_CELESTIAL
try:
from astropy.coordinates import BaseCoordinateFrame
except ImportError: # astropy <= 0.3
from astropy.coordinates import SphericalCoordinatesBase as BaseCoordinateFrame
from ..utils.wcs_utils import get_wcs_system_frame
class Polygon(object):
def __init__(self, x, y):
self.x = x
self.y = y
def segment_angles(x, y):
dx = np.diff(x)
dy = np.diff(y)
d = np.hypot(dx, dy)
cos_theta = (-dx[:-1] * dx[1:] - dy[:-1] * dy[1:]) / (d[:-1] * d[1:])
cos_theta = np.clip(cos_theta, -1., 1.)
sin_theta = (-dx[:-1] * dy[1:] + dy[:-1] * dx[1:]) / (d[:-1] * d[1:])
sin_theta = np.clip(sin_theta, -1., 1.)
theta = np.arctan2(sin_theta, cos_theta)
theta[0] = np.pi
theta[-1] = np.pi
return theta
def get_endpoints(x, y, width):
# Pad with same values at ends, to find slope of perpendicular end
# lines.
xp = np.pad(x, 1, mode='edge')
yp = np.pad(y, 1, mode='edge')
dx = np.diff(xp)
dy = np.diff(yp)
alpha = segment_angles(xp, yp) / 2.
beta = np.arctan2(dy, dx)[:-1]
beta[0] = beta[1]
gamma = -(np.pi - alpha - beta)
dx = np.cos(gamma)
dy = np.sin(gamma)
angles = segment_angles(xp, yp) / 2.
# Find points offset from main curve, on bisecting lines
x1 = x - dx * width * 0.5 / np.sin(angles)
x2 = x + dx * width * 0.5 / np.sin(angles)
y1 = y - dy * width * 0.5 / np.sin(angles)
y2 = y + dy * width * 0.5 / np.sin(angles)
return x1, y1, x2, y2
class Path(object):
"""
A curved path that may have a non-zero width and is used to extract
slices from cubes.
Parameters
----------
xy_or_coords : list or Astropy coordinates
The points defining the path. This can be passed as a list of (x, y)
tuples, which is interpreted as being pixel positions, or it can be
an Astropy coordinate object containing an array of 2 or more
coordinates.
width : None or float or :class:`~astropy.units.Quantity`
The width of the path. If ``coords`` is passed as a list of pixel
positions, the width should be given (if passed) as a floating-point
value in pixels. If ``coords`` is a coordinate object, the width
should be passed as a :class:`~astropy.units.Quantity` instance with
units of angle.
"""
def __init__(self, xy_or_coords, width=None):
if isinstance(xy_or_coords, list):
self._xy = xy_or_coords
self._coords = None
elif sys.version_info[0] > 2 and isinstance(xy_or_coords, zip):
self._xy = list(xy_or_coords)
self._coords = None
else:
self._xy = None
self._coords = xy_or_coords
self.width = width
def add_point(self, xy_or_coord):
"""
Add a point to the path
Parameters
----------
xy_or_coord : tuple or Astropy coordinate
A tuple (x, y) containing the coordinates of the point to add (if
the path is defined in pixel space), or an Astropy coordinate
object (if it is defined in world coordinates).
"""
if self._xy is not None:
if isinstance(xy_or_coord, tuple):
self._xy.append(xy_or_coord)
else:
raise TypeError("Path is defined as a list of pixel "
"coordinates, so `xy_or_coord` should be "
"a tuple of `(x,y)` pixel coordinates.")
else:
if isinstance(xy_or_coord, BaseCoordinateFrame):
raise NotImplementedError("Cannot yet append world coordinates to path")
else:
raise TypeError("Path is defined in world coordinates, "
"so `xy_or_coord` should be an Astropy "
"coordinate object.")
def get_xy(self, wcs=None):
"""
Return the pixel coordinates of the path.
If the path is defined in world coordinates, the appropriate WCS
transformation should be passed.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS`
The WCS transformation to assume in order to transform the path
to pixel coordinates.
"""
if self._xy is not None:
return self._xy
else:
if wcs is None:
raise ValueError("`wcs` is needed in order to compute "
"the pixel coordinates")
else:
# Extract the celestial component of the WCS
wcs_sky = wcs.sub([WCSSUB_CELESTIAL])
# Find the astropy name for the coordinates
# TODO: return a frame class with Astropy 0.4, since that can
# also contain equinox/epoch info.
celestial_system = get_wcs_system_frame(wcs_sky)
world_coords = self._coords.transform_to(celestial_system)
try:
xw, yw = world_coords.spherical.lon.degree, world_coords.spherical.lat.degree
except AttributeError: # astropy <= 0.3
xw, yw = world_coords.lonangle.degree, world_coords.latangle.degree
return list(zip(*wcs_sky.wcs_world2pix(xw, yw, 0)))
def sample_points_edges(self, spacing, wcs=None):
x, y = zip(*self.get_xy(wcs=wcs))
# Find the distance interval between all pairs of points
dx = np.diff(x)
dy = np.diff(y)
dd = np.hypot(dx, dy)
# Find the total displacement along the broken curve
d = np.hstack([0., np.cumsum(dd)])
# Figure out the number of points to sample, and stop short of the
# last point.
n_points = np.floor(d[-1] / spacing)
if n_points == 0:
raise ValueError("Path is shorter than spacing")
d_sampled = np.linspace(0., n_points * spacing, n_points + 1)
x_sampled = np.interp(d_sampled, d, x)
y_sampled = np.interp(d_sampled, d, y)
return d_sampled, x_sampled, y_sampled
def sample_points(self, spacing, wcs=None):
d_sampled, x_sampled, y_sampled = self.sample_points_edges(spacing, wcs=wcs)
x_sampled = 0.5 * (x_sampled[:-1] + x_sampled[1:])
y_sampled = 0.5 * (y_sampled[:-1] + y_sampled[1:])
return x_sampled, y_sampled
def sample_polygons(self, spacing, wcs=None):
x, y = zip(*self.get_xy(wcs=wcs))
d_sampled, x_sampled, y_sampled = self.sample_points_edges(spacing, wcs=wcs)
# Find the distance interval between all pairs of points
dx = np.diff(x)
dy = np.diff(y)
dd = np.hypot(dx, dy)
# Normalize to find unit vectors
dx = dx / dd
dy = dy / dd
# Find the total displacement along the broken curve
d = np.hstack([0., np.cumsum(dd)])
interval = np.searchsorted(d, d_sampled) - 1
interval[0] = 0
dx = dx[interval]
dy = dy[interval]
polygons = []
x_beg = x_sampled - dx * spacing * 0.5
x_end = x_sampled + dx * spacing * 0.5
y_beg = y_sampled - dy * spacing * 0.5
y_end = y_sampled + dy * spacing * 0.5
x1 = x_beg - dy * self.width * 0.5
y1 = y_beg + dx * self.width * 0.5
x2 = x_end - dy * self.width * 0.5
y2 = y_end + dx * self.width * 0.5
x3 = x_end + dy * self.width * 0.5
y3 = y_end - dx * self.width * 0.5
x4 = x_beg + dy * self.width * 0.5
y4 = y_beg - dx * self.width * 0.5
for i in range(len(x_sampled) - 1):
p = Polygon([x1[i], x2[i], x3[i], x4[i]], [y1[i], y2[i], y3[i], y4[i]])
polygons.append(p)
return polygons
|
bsipocz/glue
|
glue/external/pvextractor/geometry/path.py
|
Python
|
bsd-3-clause
| 7,926
|
from collections import namedtuple
from dimagi.ext import jsonobject
from dimagi.utils import parsing as dateparse
from django.utils.translation import ugettext as _
from corehq.apps.commtrack import const
from corehq.apps.commtrack.const import StockActions
from corehq.apps.commtrack.exceptions import MissingProductId
from corehq.apps.commtrack.models import CommtrackActionConfig
from corehq.apps.commtrack.xmlutil import XML
from corehq.apps.products.models import Product
class UniqueLedgerReference(namedtuple('UniqueLedgerReference', ['case_id', 'section_id', 'entry_id'])):
def as_id(self):
return '/'.join(self)
@classmethod
def from_id(cls, id_string):
return UniqueLedgerReference(*id_string.split('/'))
class StockReportHelper(jsonobject.JsonObject):
"""
Intermediate class for dealing with stock XML
"""
domain = jsonobject.StringProperty()
form_id = jsonobject.StringProperty()
timestamp = jsonobject.DateTimeProperty()
tag = jsonobject.StringProperty()
transactions = jsonobject.ListProperty(lambda: StockTransactionHelper)
server_date = jsonobject.DateTimeProperty()
deprecated = jsonobject.BooleanProperty()
@property
def report_type(self):
# this is for callers to be able to use a less confusing name
return self.tag
@classmethod
def make_from_form(cls, form, timestamp, tag, transactions):
deprecated = form.is_deprecated
return cls(
domain=form.domain,
form_id=form.form_id if not deprecated else form.orig_id,
timestamp=timestamp,
tag=tag,
transactions=transactions,
server_date=form.received_on,
deprecated=deprecated,
)
def validate(self):
"""
Validates this object as best we can and raises Exceptions if we find anything invalid .
"""
if any(transaction_helper.product_id in ('', None) for transaction_helper in self.transactions):
raise MissingProductId(_('Product IDs must be set for all ledger updates!'))
class StockTransactionHelper(jsonobject.JsonObject):
"""
Helper class for transactions
"""
product_id = jsonobject.StringProperty()
action = jsonobject.StringProperty()
subaction = jsonobject.StringProperty()
domain = jsonobject.StringProperty()
quantity = jsonobject.DecimalProperty()
# todo: this field is never populated during normal form submissions, only on SMS submissions
location_id = jsonobject.StringProperty()
timestamp = jsonobject.DateTimeProperty()
case_id = jsonobject.StringProperty()
section_id = jsonobject.StringProperty()
@property
def ledger_reference(self):
return UniqueLedgerReference(
case_id=self.case_id, section_id=self.section_id, entry_id=self.product_id
)
@property
def relative_quantity(self):
"""
Gets the quantity of this transaction as a positive or negative number
depending on the action/context
"""
if self.action == const.StockActions.CONSUMPTION:
return -self.quantity
else:
return self.quantity
def action_config(self, commtrack_config):
action = CommtrackActionConfig(action=self.action,
subaction=self.subaction)
for a in commtrack_config.all_actions:
if a.name == action.name:
return a
return None
@property
def date(self):
if self.timestamp:
return dateparse.json_format_datetime(self.timestamp)
def to_xml(self, E=None, **kwargs):
if not E:
E = XML()
return E.entry(
id=self.product_id,
quantity=str(self.quantity if self.action != StockActions.STOCKOUT
else 0),
)
@property
def category(self):
return 'stock'
def fragment(self):
"""
A short string representation of this to be used in sms correspondence
"""
if self.quantity is not None:
quant = self.quantity
else:
quant = ''
# FIXME product fetch here is inefficient
return '%s%s' % (Product.get(self.product_id).code.lower(), quant)
def __repr__(self):
return '{action} ({subaction}): {quantity} (loc: {location_id}, product: {product_id})'.format(
action=self.action,
subaction=self.subaction,
quantity=self.quantity,
location_id=self.location_id,
product_id=self.product_id,
)
|
qedsoftware/commcare-hq
|
corehq/form_processor/parsers/ledgers/helpers.py
|
Python
|
bsd-3-clause
| 4,653
|
import os
import shutil
import tempfile
from PIL import Image
from django.db.models import get_model
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from oscar_testsupport.testcases import WebTestCase
FancyPage = get_model('fancypages', 'FancyPage')
TEMP_IMAGE_DIR = tempfile.mkdtemp(suffix='_page_tests_images')
TEMP_MEDIA_ROOT = tempfile.mkdtemp(suffix='_page_tests_media')
class TestHomePage(WebTestCase):
def setUp(self):
super(TestHomePage, self).setUp()
def test_is_created_when_no_home_page_exists(self):
self.assertEquals(FancyPage.objects.count(), 0)
home_page = self.app.get(reverse('home'))
self.assertEquals(FancyPage.objects.count(), 1)
fancypage = FancyPage.objects.all()[0]
context = home_page.context[0]
self.assertIn('page-container', context)
self.assertEquals(context.get('object').id, fancypage.id)
self.assertEquals(fancypage.containers.count(), 1)
container = fancypage.containers.all()[0]
self.assertEquals(type(container.page_object), type(fancypage))
self.assertEquals(container.page_object.id, fancypage.id)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class TestAFancyPage(WebTestCase):
is_staff = True
def tearDown(self):
super(TestAFancyPage, self).tearDown()
if os.path.exists(TEMP_MEDIA_ROOT):
shutil.rmtree(TEMP_MEDIA_ROOT)
if os.path.exists(TEMP_IMAGE_DIR):
shutil.rmtree(TEMP_IMAGE_DIR)
def test_can_be_updated_with_an_image(self):
fancy_page = FancyPage.add_root(name='Sample Page')
self.assertEquals(fancy_page.image, None)
im = Image.new("RGB", (320, 240), "red")
__, filename = tempfile.mkstemp(suffix='.jpg', dir=TEMP_IMAGE_DIR)
im.save(filename, "JPEG")
page = self.get(
reverse('fp-dashboard:page-update', args=(fancy_page.id,))
)
settings_form = page.form
settings_form['image'] = (filename,)
list_page = settings_form.submit()
self.assertRedirects(list_page, reverse('fp-dashboard:page-list'))
category_path = os.path.join(TEMP_MEDIA_ROOT, 'categories')
fancy_page = FancyPage.objects.get(id=fancy_page.id)
self.assertEquals(
fancy_page.image.path,
os.path.join(category_path, filename.rsplit('/')[-1])
)
|
tangentlabs/django-oscar-fancypages
|
tests/functional/tests_page.py
|
Python
|
bsd-3-clause
| 2,437
|
from flask_wtf import Form
from mongoengine import DoesNotExist
from wtforms import PasswordField, StringField, BooleanField
from wtforms.validators import DataRequired
from Norman.models import Hospital
from Norman.utils import hash_data
class LoginForm(Form):
"""Login form."""
email = StringField('email', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember', validators=[])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
password = hash_data(self.password.data)
try:
self.user = Hospital.objects.get(email=self.email.data, password=password)
return self.user
except DoesNotExist:
return False
class PatientLoginForm(Form):
"""Login form."""
email = StringField('email', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember', validators=[])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(PatientLoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(PatientLoginForm, self).validate()
if not initial_validation:
return False
password = hash_data(self.password.data)
try:
self.user = Hospital.objects.get(email=self.email.data, password=password)
return self.user
except DoesNotExist:
return False
class PatientPasswordChooseForm(Form):
"""Login form."""
email = StringField('email', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember', validators=[])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(PatientPasswordChooseForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(PatientPasswordChooseForm, self).validate()
if not initial_validation:
return False
password = hash_data(self.password.data)
try:
self.user = Hospital.objects.get(email=self.email.data, password=password)
return self.user
except DoesNotExist:
return False
class VerificationForm(Form):
"""
Verification Specific Form
"""
verficationID = StringField('email', validators=[DataRequired()])
|
Olamyy/Norman
|
Norman/auth/forms.py
|
Python
|
bsd-3-clause
| 2,878
|
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from dash.orgs.models import Org
from dash.utils import random_string
@receiver(post_save, sender=Org)
def create_org_backend(sender, instance=None, created=False, **kwargs):
if created:
instance.backends.get_or_create(
backend_type=settings.SITE_BACKEND,
api_token=random_string(32),
slug="rapidpro",
host=settings.SITE_API_HOST,
created_by=instance.created_by,
modified_by=instance.created_by
)
|
praekelt/casepro
|
casepro/orgs_ext/signals.py
|
Python
|
bsd-3-clause
| 609
|
import collections
import random
import sys
import re
from raco.algebra import *
from raco.expression import NamedAttributeRef as AttRef
from raco.expression import UnnamedAttributeRef as AttIndex
from raco.expression import StateVar
from raco.expression import aggregate
from raco.backends.myria import (
MyriaShuffleConsumer, MyriaShuffleProducer, MyriaHyperCubeShuffleProducer,
MyriaBroadcastConsumer, MyriaQueryScan, MyriaSplitConsumer, MyriaUnionAll,
MyriaBroadcastProducer, MyriaScan, MyriaSelect, MyriaSplitProducer,
MyriaDupElim, MyriaGroupBy, MyriaIDBController, MyriaSymmetricHashJoin,
compile_to_json)
from raco.backends.myria import (MyriaLeftDeepTreeAlgebra,
MyriaHyperCubeAlgebra)
from raco.compile import optimize
from raco import relation_key
from raco.catalog import FakeCatalog
import raco.scheme as scheme
import raco.myrial.myrial_test as myrial_test
from raco import types
class OptimizerTest(myrial_test.MyrialTestCase):
x_scheme = scheme.Scheme([("a", types.LONG_TYPE), ("b", types.LONG_TYPE), ("c", types.LONG_TYPE)]) # noqa
y_scheme = scheme.Scheme([("d", types.LONG_TYPE), ("e", types.LONG_TYPE), ("f", types.LONG_TYPE)]) # noqa
z_scheme = scheme.Scheme([('src', types.LONG_TYPE), ('dst', types.LONG_TYPE)]) # noqa
part_scheme = scheme.Scheme([("g", types.LONG_TYPE), ("h", types.LONG_TYPE), ("i", types.LONG_TYPE)]) # noqa
broad_scheme = scheme.Scheme([("j", types.LONG_TYPE), ("k", types.LONG_TYPE), ("l", types.LONG_TYPE)]) # noqa
x_key = relation_key.RelationKey.from_string("public:adhoc:X")
y_key = relation_key.RelationKey.from_string("public:adhoc:Y")
z_key = relation_key.RelationKey.from_string("public:adhoc:Z")
part_key = relation_key.RelationKey.from_string("public:adhoc:part")
broad_key = relation_key.RelationKey.from_string("public:adhoc:broad")
part_partition = RepresentationProperties(
hash_partitioned=tuple([AttIndex(1)]))
broad_partition = RepresentationProperties(broadcasted=True)
random.seed(387) # make results deterministic
rng = 20
count = 30
z_data = collections.Counter([(1, 2), (2, 3), (1, 2), (3, 4)])
x_data = collections.Counter(
[(random.randrange(rng), random.randrange(rng),
random.randrange(rng)) for _ in range(count)])
y_data = collections.Counter(
[(random.randrange(rng), random.randrange(rng),
random.randrange(rng)) for _ in range(count)])
part_data = collections.Counter(
[(random.randrange(rng), random.randrange(rng),
random.randrange(rng)) for _ in range(count)])
broad_data = collections.Counter(
[(random.randrange(rng), random.randrange(rng),
random.randrange(rng)) for _ in range(count)])
def setUp(self):
super(OptimizerTest, self).setUp()
self.db.ingest(self.x_key, self.x_data, self.x_scheme)
self.db.ingest(self.y_key, self.y_data, self.y_scheme)
self.db.ingest(self.z_key, self.z_data, self.z_scheme)
self.db.ingest(self.part_key, self.part_data, self.part_scheme,
self.part_partition) # "partitioned" table
self.db.ingest(self.broad_key, self.broad_data,
self.broad_scheme, self.broad_partition)
@staticmethod
def logical_to_physical(lp, **kwargs):
if kwargs.get('hypercube', False):
algebra = MyriaHyperCubeAlgebra(FakeCatalog(64))
else:
algebra = MyriaLeftDeepTreeAlgebra()
return optimize(lp, algebra, **kwargs)
@staticmethod
def get_count(op, claz):
"""Return the count of operator instances within an operator tree."""
def count(_op):
if isinstance(_op, claz):
yield 1
else:
yield 0
return sum(op.postorder(count))
@staticmethod
def get_num_select_conjuncs(op):
"""Get the number of conjunctions within all select operations."""
def count(_op):
if isinstance(_op, Select):
yield len(expression.extract_conjuncs(_op.condition))
else:
yield 0
return sum(op.postorder(count))
def test_push_selects(self):
"""Test pushing selections into and across cross-products."""
lp = StoreTemp('OUTPUT',
Select(expression.LTEQ(AttRef("e"), AttRef("f")),
Select(expression.EQ(AttRef("c"), AttRef("d")),
Select(expression.GT(AttRef("a"), AttRef("b")),
CrossProduct(Scan(self.x_key, self.x_scheme),
Scan(self.y_key, self.y_scheme)))))) # noqa
self.assertEquals(self.get_count(lp, Select), 3)
self.assertEquals(self.get_count(lp, CrossProduct), 1)
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, MyriaSplitConsumer)
self.assertIsInstance(pp.input.input.input, Join)
self.assertEquals(self.get_count(pp, Select), 2)
self.assertEquals(self.get_count(pp, CrossProduct), 0)
self.db.evaluate(pp)
result = self.db.get_temp_table('OUTPUT')
expected = collections.Counter(
[(a, b, c, d, e, f) for (a, b, c) in self.x_data
for (d, e, f) in self.y_data if a > b and e <= f and c == d])
self.assertEquals(result, expected)
def test_collapse_applies(self):
"""Test pushing applies together."""
lp = StoreTemp('OUTPUT',
Apply([(None, AttIndex(1)), ('w', expression.PLUS(AttIndex(0), AttIndex(0)))], # noqa
Apply([(None, AttIndex(1)), (None, AttIndex(0)), (None, AttIndex(1))], # noqa
Apply([('x', AttIndex(0)), ('y', expression.PLUS(AttIndex(1), AttIndex(0)))], # noqa
Apply([(None, AttIndex(0)), (None, AttIndex(1))],
Scan(self.x_key, self.x_scheme)))))) # noqa
self.assertEquals(self.get_count(lp, Apply), 4)
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, Apply)
self.assertEquals(self.get_count(pp, Apply), 1)
expected = collections.Counter(
[(b, a + a) for (a, b, c) in
[(b, a, b) for (a, b) in
[(a, b + a) for (a, b) in
[(a, b) for (a, b, c) in self.x_data]]]]) # noqa
self.db.evaluate(pp)
result = self.db.get_temp_table('OUTPUT')
self.assertEquals(result, expected)
def test_select_count_star(self):
"""Test that we don't generate 0-length applies from a COUNT(*)."""
lp = StoreTemp('OUTPUT',
GroupBy([], [expression.COUNTALL()],
Scan(self.x_key, self.x_scheme)))
self.assertEquals(self.get_count(lp, GroupBy), 1)
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input.input.input, GroupBy)
# SplitC.SplitP.GroupBy.CollectP.CollectC.GroupBy.Apply
apply = pp.input.input.input.input.input.input.input
self.assertIsInstance(apply, Apply)
self.assertEquals(self.get_count(pp, Apply), 1)
self.assertEquals(len(apply.scheme()), 1)
expected = collections.Counter([(len(self.x_data),)])
self.db.evaluate(pp)
result = self.db.get_temp_table('OUTPUT')
self.assertEquals(result, expected)
def test_projects_apply_join(self):
"""Test column selection both Apply into ProjectingJoin
and ProjectingJoin into its input.
"""
lp = StoreTemp('OUTPUT',
Apply([(None, AttIndex(1))],
ProjectingJoin(expression.EQ(AttIndex(0), AttIndex(3)),
Scan(self.x_key, self.x_scheme),
Scan(self.x_key, self.x_scheme),
[AttIndex(i) for i in xrange(2 * len(self.x_scheme))]))) # noqa
self.assertIsInstance(lp.input.input, ProjectingJoin)
self.assertEquals(2 * len(self.x_scheme),
len(lp.input.input.scheme()))
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, MyriaSplitConsumer)
proj_join = pp.input.input.input
self.assertIsInstance(proj_join, ProjectingJoin)
self.assertEquals(1, len(proj_join.scheme()))
self.assertEquals(2, len(proj_join.left.scheme()))
self.assertEquals(1, len(proj_join.right.scheme()))
expected = collections.Counter(
[(b,)
for (a, b, c) in self.x_data
for (d, e, f) in self.x_data
if a == d])
self.db.evaluate(pp)
result = self.db.get_temp_table('OUTPUT')
self.assertEquals(result, expected)
def test_push_selects_apply(self):
"""Test pushing selections through apply."""
lp = StoreTemp('OUTPUT',
Select(expression.LTEQ(AttRef("c"), AttRef("a")),
Select(expression.LTEQ(AttRef("b"), AttRef("c")),
Apply([('b', AttIndex(1)),
('c', AttIndex(2)),
('a', AttIndex(0))],
Scan(self.x_key, self.x_scheme))))) # noqa
expected = collections.Counter(
[(b, c, a) for (a, b, c) in self.x_data if c <= a and b <= c])
self.assertEquals(self.get_count(lp, Select), 2)
self.assertEquals(self.get_count(lp, Scan), 1)
self.assertIsInstance(lp.input, Select)
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, Apply)
self.assertEquals(self.get_count(pp, Select), 1)
self.db.evaluate(pp)
result = self.db.get_temp_table('OUTPUT')
self.assertEquals(result, expected)
def test_push_selects_groupby(self):
"""Test pushing selections through groupby."""
lp = StoreTemp('OUTPUT',
Select(expression.LTEQ(AttRef("c"), AttRef("a")),
Select(expression.LTEQ(AttRef("b"), AttRef("c")),
GroupBy([AttIndex(1), AttIndex(2), AttIndex(0)],
[expression.COUNTALL()],
Scan(self.x_key, self.x_scheme))))) # noqa
expected = collections.Counter(
[(b, c, a) for (a, b, c) in self.x_data if c <= a and b <= c])
expected = collections.Counter(k + (v,) for k, v in expected.items())
self.assertEquals(self.get_count(lp, Select), 2)
self.assertEquals(self.get_count(lp, Scan), 1)
self.assertIsInstance(lp.input, Select)
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, MyriaSplitConsumer)
self.assertIsInstance(pp.input.input.input, GroupBy)
self.assertEquals(self.get_count(pp, Select), 1)
self.db.evaluate(pp)
result = self.db.get_temp_table('OUTPUT')
self.assertEquals(result, expected)
def test_noop_apply_removed(self):
lp = StoreTemp('OUTPUT',
Apply([(None, AttIndex(1))],
ProjectingJoin(expression.EQ(AttIndex(0), AttIndex(3)),
Scan(self.x_key, self.x_scheme),
Scan(self.x_key, self.x_scheme),
[AttIndex(i) for i in xrange(2 * len(self.x_scheme))]))) # noqa
self.assertIsInstance(lp.input, Apply)
lp_scheme = lp.scheme()
pp = self.logical_to_physical(lp)
self.assertNotIsInstance(pp.input, Apply)
self.assertEquals(lp_scheme, pp.scheme())
def test_not_noop_apply_not_removed(self):
lp = StoreTemp('OUTPUT',
Apply([('hi', AttIndex(1))],
ProjectingJoin(expression.EQ(AttIndex(0), AttIndex(3)),
Scan(self.x_key, self.x_scheme),
Scan(self.x_key, self.x_scheme),
[AttIndex(i) for i in xrange(2 * len(self.x_scheme))]))) # noqa
self.assertIsInstance(lp.input, Apply)
lp_scheme = lp.scheme()
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, Apply)
self.assertEquals(lp_scheme, pp.scheme())
def test_extract_join(self):
"""Extract a join condition from the middle of complex select."""
s = expression.AND(expression.LTEQ(AttRef("e"), AttRef("f")),
expression.AND(
expression.EQ(AttRef("c"), AttRef("d")),
expression.GT(AttRef("a"), AttRef("b"))))
lp = StoreTemp('OUTPUT', Select(s, CrossProduct(
Scan(self.x_key, self.x_scheme),
Scan(self.y_key, self.y_scheme))))
self.assertEquals(self.get_num_select_conjuncs(lp), 3)
pp = self.logical_to_physical(lp)
# non-equijoin conditions should get pushed separately below the join
self.assertIsInstance(pp.input, MyriaSplitConsumer)
self.assertIsInstance(pp.input.input.input, Join)
self.assertEquals(self.get_count(pp, CrossProduct), 0)
self.assertEquals(self.get_count(pp, Select), 2)
self.db.evaluate(pp)
result = self.db.get_temp_table('OUTPUT')
expected = collections.Counter(
[(a, b, c, d, e, f) for (a, b, c) in self.x_data
for (d, e, f) in self.y_data if a > b and e <= f and c == d])
self.assertEquals(result, expected)
def test_multi_condition_join(self):
s = expression.AND(expression.EQ(AttRef("c"), AttRef("d")),
expression.EQ(AttRef("a"), AttRef("f")))
lp = StoreTemp('OUTPUT', Select(s, CrossProduct(
Scan(self.x_key, self.x_scheme),
Scan(self.y_key, self.y_scheme))))
self.assertEquals(self.get_num_select_conjuncs(lp), 2)
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, CrossProduct), 0)
self.assertEquals(self.get_count(pp, Select), 0)
expected = collections.Counter(
[(a, b, c, d, e, f) for (a, b, c) in self.x_data
for (d, e, f) in self.y_data if a == f and c == d])
self.db.evaluate(pp)
result = self.db.get_temp_table('OUTPUT')
self.assertEquals(result, expected)
def test_multiway_join_left_deep(self):
query = """
T = SCAN(public:adhoc:Z);
U = [FROM T AS T1, T AS T2, T AS T3
WHERE T1.dst==T2.src AND T2.dst==T3.src
EMIT T1.src AS x, T3.dst AS y];
STORE(U, OUTPUT);
"""
lp = self.get_logical_plan(query)
self.assertEquals(self.get_count(lp, CrossProduct), 2)
self.assertEquals(self.get_count(lp, Join), 0)
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, CrossProduct), 0)
self.assertEquals(self.get_count(pp, Join), 2)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 4)
self.assertEquals(self.get_count(pp, NaryJoin), 0)
self.assertEquals(self.get_count(pp, MyriaHyperCubeShuffleProducer), 0)
self.db.evaluate(pp)
result = self.db.get_table('OUTPUT')
expected = collections.Counter(
[(s1, d3) for (s1, d1) in self.z_data.elements()
for (s2, d2) in self.z_data.elements()
for (s3, d3) in self.z_data.elements() if d1 == s2 and d2 == s3])
self.assertEquals(result, expected)
def test_multiway_join_hyper_cube(self):
query = """
T = SCAN(public:adhoc:Z);
U = [FROM T AS T1, T AS T2, T AS T3
WHERE T1.dst==T2.src AND T2.dst==T3.src
EMIT T1.src AS x, T3.dst AS y];
STORE(U, OUTPUT);
"""
lp = self.get_logical_plan(query)
self.assertEquals(self.get_count(lp, CrossProduct), 2)
self.assertEquals(self.get_count(lp, Join), 0)
pp = self.logical_to_physical(lp, hypercube=True)
self.assertEquals(self.get_count(pp, CrossProduct), 0)
self.assertEquals(self.get_count(pp, Join), 0)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 0)
self.assertEquals(self.get_count(pp, NaryJoin), 1)
self.assertEquals(self.get_count(pp, MyriaHyperCubeShuffleProducer), 3)
self.db.evaluate(pp)
result = self.db.get_table('OUTPUT')
expected = collections.Counter(
[(s1, d3) for (s1, d1) in self.z_data.elements()
for (s2, d2) in self.z_data.elements()
for (s3, d3) in self.z_data.elements() if d1 == s2 and d2 == s3])
self.assertEquals(result, expected)
def test_hyper_cube_tie_breaking_heuristic(self):
query = """
T = SCAN(public:adhoc:Z);
U = [FROM T AS T1, T AS T2, T AS T3, T AS T4
WHERE T1.dst=T2.src AND T2.dst=T3.src AND
T3.dst=T4.src AND T4.dst=T1.src
EMIT T1.src AS x, T3.dst AS y];
STORE(U, OUTPUT);
"""
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp, hypercube=True)
def get_max_dim_size(_op):
if isinstance(_op, MyriaHyperCubeShuffleProducer):
yield max(_op.hyper_cube_dimensions)
# the max hypercube dim size will be 8, e.g (1, 8, 1, 8) without
# tie breaking heuristic, now it is (2, 4, 2, 4)
self.assertTrue(max(pp.postorder(get_max_dim_size)) <= 4)
def test_naryjoin_merge(self):
query = """
T1 = scan(public:adhoc:Z);
T2 = [from T1 emit count(dst) as dst, src];
T3 = scan(public:adhoc:Z);
twohop = [from T1, T2, T3
where T1.dst = T2.src and T2.dst = T3.src
emit *];
store(twohop, anothertwohop);
"""
statements = self.parser.parse(query)
self.processor.evaluate(statements)
lp = self.processor.get_logical_plan()
pp = self.logical_to_physical(lp, hypercube=True)
self.assertEquals(self.get_count(pp, NaryJoin), 0)
def test_right_deep_join(self):
"""Test pushing a selection into a right-deep join tree.
Myrial doesn't emit these, so we need to cook up a plan by hand."""
s = expression.AND(expression.EQ(AttIndex(1), AttIndex(2)),
expression.EQ(AttIndex(3), AttIndex(4)))
lp = Apply([('x', AttIndex(0)), ('y', AttIndex(5))],
Select(s,
CrossProduct(Scan(self.z_key, self.z_scheme),
CrossProduct(
Scan(self.z_key, self.z_scheme),
Scan(self.z_key, self.z_scheme)))))
lp = StoreTemp('OUTPUT', lp)
self.assertEquals(self.get_count(lp, CrossProduct), 2)
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, CrossProduct), 0)
self.db.evaluate(pp)
result = self.db.get_temp_table('OUTPUT')
expected = collections.Counter(
[(s1, d3) for (s1, d1) in self.z_data.elements()
for (s2, d2) in self.z_data.elements()
for (s3, d3) in self.z_data.elements() if d1 == s2 and d2 == s3])
self.assertEquals(result, expected)
def test_explicit_shuffle(self):
"""Test of a user-directed partition operation."""
query = """
T = SCAN(public:adhoc:X);
STORE(T, OUTPUT, [$2, b]);
"""
statements = self.parser.parse(query)
self.processor.evaluate(statements)
lp = self.processor.get_logical_plan()
self.assertEquals(self.get_count(lp, Shuffle), 1)
for op in lp.walk():
if isinstance(op, Shuffle):
self.assertEquals(op.columnlist, [AttIndex(2), AttIndex(1)])
def test_shuffle_before_distinct(self):
query = """
T = DISTINCT(SCAN(public:adhoc:Z));
STORE(T, OUTPUT);
"""
pp = self.get_physical_plan(query)
self.assertEquals(self.get_count(pp, Distinct), 2) # distributed
first = True
for op in pp.walk():
if isinstance(op, Distinct):
self.assertIsInstance(op.input, MyriaShuffleConsumer)
self.assertIsInstance(op.input.input, MyriaShuffleProducer)
break
def test_shuffle_before_difference(self):
query = """
T = DIFF(SCAN(public:adhoc:Z), SCAN(public:adhoc:Z));
STORE(T, OUTPUT);
"""
pp = self.get_physical_plan(query)
self.assertEquals(self.get_count(pp, Difference), 1)
for op in pp.walk():
if isinstance(op, Difference):
self.assertIsInstance(op.left, MyriaShuffleConsumer)
self.assertIsInstance(op.left.input, MyriaShuffleProducer)
self.assertIsInstance(op.right, MyriaShuffleConsumer)
self.assertIsInstance(op.right.input, MyriaShuffleProducer)
def test_bug_240_broken_remove_unused_columns_rule(self):
query = """
particles = empty(nowGroup:int, timestep:int, grp:int);
haloTable1 = [from particles as P
emit P.nowGroup,
(P.timestep+P.grp) as halo,
count(*) as totalParticleCount];
haloTable2 = [from haloTable1 as H, particles as P
where H.nowGroup = P.nowGroup
emit *];
store(haloTable2, OutputTemp);
"""
# This is it -- just test that we can get the physical plan and
# compile to JSON. See https://github.com/uwescience/raco/issues/240
pp = self.execute_query(query, output='OutputTemp')
def test_broadcast_cardinality_right(self):
# x and y have the same cardinality, z is smaller
query = """
x = scan({x});
y = scan({y});
z = scan({z});
out = [from x, z emit *];
store(out, OUTPUT);
""".format(x=self.x_key, y=self.y_key, z=self.z_key)
pp = self.get_physical_plan(query)
counter = 0
for op in pp.walk():
if isinstance(op, CrossProduct):
counter += 1
self.assertIsInstance(op.right, MyriaBroadcastConsumer)
self.assertEquals(counter, 1)
def test_broadcast_cardinality_left(self):
# x and y have the same cardinality, z is smaller
query = """
x = scan({x});
y = scan({y});
z = scan({z});
out = [from z, y emit *];
store(out, OUTPUT);
""".format(x=self.x_key, y=self.y_key, z=self.z_key)
pp = self.get_physical_plan(query)
counter = 0
for op in pp.walk():
if isinstance(op, CrossProduct):
counter += 1
self.assertIsInstance(op.left, MyriaBroadcastConsumer)
self.assertEquals(counter, 1)
def test_broadcast_cardinality_with_agg(self):
# x and y have the same cardinality, z is smaller
query = """
x = scan({x});
y = countall(scan({y}));
z = scan({z});
out = [from y, z emit *];
store(out, OUTPUT);
""".format(x=self.x_key, y=self.y_key, z=self.z_key)
pp = self.get_physical_plan(query)
counter = 0
for op in pp.walk():
if isinstance(op, CrossProduct):
counter += 1
self.assertIsInstance(op.left, MyriaBroadcastConsumer)
self.assertEquals(counter, 1)
def test_relation_cardinality(self):
query = """
x = scan({x});
out = [from x as x1, x as x2 emit *];
store(out, OUTPUT);
""".format(x=self.x_key)
lp = self.get_logical_plan(query)
self.assertIsInstance(lp, Sequence)
self.assertEquals(1, len(lp.children()))
self.assertEquals(sum(self.x_data.values()) ** 2,
lp.children()[0].num_tuples())
def test_relation_physical_cardinality(self):
query = """
x = scan({x});
out = [from x as x1, x as x2 emit *];
store(out, OUTPUT);
""".format(x=self.x_key)
pp = self.get_physical_plan(query)
self.assertEquals(sum(self.x_data.values()) ** 2,
pp.num_tuples())
def test_catalog_cardinality(self):
self.assertEquals(sum(self.x_data.values()),
self.db.num_tuples(self.x_key))
self.assertEquals(sum(self.y_data.values()),
self.db.num_tuples(self.y_key))
self.assertEquals(sum(self.z_data.values()),
self.db.num_tuples(self.z_key))
def test_groupby_to_distinct(self):
query = """
x = scan({x});
y = select $0, count(*) from x;
z = select $0 from y;
store(z, OUTPUT);
""".format(x=self.x_key)
lp = self.get_logical_plan(query)
self.assertEquals(self.get_count(lp, GroupBy), 1)
self.assertEquals(self.get_count(lp, Distinct), 0)
pp = self.logical_to_physical(copy.deepcopy(lp))
self.assertEquals(self.get_count(pp, GroupBy), 0)
self.assertEquals(self.get_count(pp, Distinct), 2) # distributed
self.assertEquals(self.db.evaluate(lp), self.db.evaluate(pp))
def test_groupby_to_lesser_groupby(self):
query = """
x = scan({x});
y = select $0, count(*), sum($1) from x;
z = select $0, $2 from y;
store(z, OUTPUT);
""".format(x=self.x_key)
lp = self.get_logical_plan(query)
self.assertEquals(self.get_count(lp, GroupBy), 1)
for op in lp.walk():
if isinstance(op, GroupBy):
self.assertEquals(len(op.grouping_list), 1)
self.assertEquals(len(op.aggregate_list), 2)
pp = self.logical_to_physical(copy.deepcopy(lp))
self.assertEquals(self.get_count(pp, GroupBy), 2) # distributed
for op in pp.walk():
if isinstance(op, GroupBy):
self.assertEquals(len(op.grouping_list), 1)
self.assertEquals(len(op.aggregate_list), 1)
self.assertEquals(self.db.evaluate(lp), self.db.evaluate(pp))
def __run_uda_test(self, uda_state=None):
scan = Scan(self.x_key, self.x_scheme)
init_ex = expression.NumericLiteral(0)
update_ex = expression.PLUS(expression.NamedStateAttributeRef("value"),
AttIndex(1))
emit_ex = expression.UdaAggregateExpression(
expression.NamedStateAttributeRef("value"), uda_state)
statemods = [StateVar("value", init_ex, update_ex)]
log_gb = GroupBy([AttIndex(0)], [emit_ex], scan, statemods)
lp = StoreTemp('OUTPUT', log_gb)
pp = self.logical_to_physical(copy.deepcopy(lp))
self.db.evaluate(lp)
log_result = self.db.get_temp_table('OUTPUT')
self.db.delete_temp_table('OUTPUT')
self.db.evaluate(pp)
phys_result = self.db.get_temp_table('OUTPUT')
self.assertEquals(log_result, phys_result)
self.assertEquals(len(log_result), 15)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 1)
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 1)
return pp
def test_non_decomposable_uda(self):
"""Test that optimization preserves the value of a non-decomposable UDA
"""
pp = self.__run_uda_test()
for op in pp.walk():
if isinstance(op, MyriaShuffleProducer):
self.assertEquals(op.hash_columns, [AttIndex(0)])
self.assertEquals(self.get_count(op, GroupBy), 0)
def test_decomposable_uda(self):
"""Test that optimization preserves the value of decomposable UDAs"""
lemits = [expression.UdaAggregateExpression(
expression.NamedStateAttributeRef("value"))]
remits = copy.deepcopy(lemits)
init_ex = expression.NumericLiteral(0)
update_ex = expression.PLUS(expression.NamedStateAttributeRef("value"),
AttIndex(1))
lstatemods = [StateVar("value", init_ex, update_ex)]
rstatemods = copy.deepcopy(lstatemods)
uda_state = expression.DecomposableAggregateState(
lemits, lstatemods, remits, rstatemods)
pp = self.__run_uda_test(uda_state)
self.assertEquals(self.get_count(pp, GroupBy), 2)
for op in pp.walk():
if isinstance(op, MyriaShuffleProducer):
self.assertEquals(op.hash_columns, [AttIndex(0)])
self.assertEquals(self.get_count(op, GroupBy), 1)
def test_successful_append(self):
"""Insert an append if storing a relation into itself with a
UnionAll."""
query = """
x = scan({x});
y = select $0 from x;
y2 = select $1 from x;
y = y+y2;
store(y, OUTPUT);
""".format(x=self.x_key)
lp = self.get_logical_plan(query, apply_chaining=False)
self.assertEquals(self.get_count(lp, ScanTemp), 5)
self.assertEquals(self.get_count(lp, StoreTemp), 4)
self.assertEquals(self.get_count(lp, AppendTemp), 0)
self.assertEquals(self.get_count(lp, Store), 1)
self.assertEquals(self.get_count(lp, Scan), 1)
pp = self.logical_to_physical(copy.deepcopy(lp))
self.assertEquals(self.get_count(pp, ScanTemp), 4)
self.assertEquals(self.get_count(pp, StoreTemp), 3)
self.assertEquals(self.get_count(pp, AppendTemp), 1)
self.assertEquals(self.get_count(pp, Store), 1)
self.assertEquals(self.get_count(pp, Scan), 1)
self.assertEquals(self.db.evaluate(lp), self.db.evaluate(pp))
def test_failed_append(self):
"""Do not insert an append when the tuples to be appended
depend on the relation itself."""
# NB test in both the left and right directions
# left: y = y + y2
# right: y = y2 + y
query = """
x = scan({x});
y = select $0, $1 from x;
t = empty(a:int);
y2 = select $1, $1 from y;
y = y+y2;
t = empty(a:int);
y3 = select $1, $1 from y;
y = y3+y;
s = empty(a:int);
store(y, OUTPUT);
""".format(x=self.x_key)
lp = self.get_logical_plan(query, dead_code_elimination=False)
self.assertEquals(self.get_count(lp, AppendTemp), 0)
# No AppendTemp
pp = self.logical_to_physical(copy.deepcopy(lp))
self.assertEquals(self.get_count(pp, AppendTemp), 0)
self.assertEquals(self.db.evaluate(lp), self.db.evaluate(pp))
def test_push_work_into_sql(self):
"""Test generation of MyriaQueryScan operator for query with
projects"""
query = """
r3 = scan({x});
intermediate = select a, c from r3;
store(intermediate, OUTPUT);
""".format(x=self.x_key)
pp = self.get_physical_plan(query, push_sql=True)
self.assertEquals(self.get_count(pp, Operator), 2)
self.assertTrue(isinstance(pp.input, MyriaQueryScan))
expected = collections.Counter([(a, c) for (a, b, c) in self.x_data])
self.db.evaluate(pp)
result = self.db.get_table('OUTPUT')
self.assertEquals(result, expected)
def test_push_work_into_sql_2(self):
"""Test generation of MyriaQueryScan operator for query with projects
and a filter"""
query = """
r3 = scan({x});
intermediate = select a, c from r3 where b < 5;
store(intermediate, OUTPUT);
""".format(x=self.x_key)
pp = self.get_physical_plan(query, push_sql=True)
self.assertEquals(self.get_count(pp, Operator), 2)
self.assertTrue(isinstance(pp.input, MyriaQueryScan))
expected = collections.Counter([(a, c)
for (a, b, c) in self.x_data
if b < 5])
self.db.evaluate(pp)
result = self.db.get_table('OUTPUT')
self.assertEquals(result, expected)
def test_no_push_when_shuffle(self):
"""When data is not co-partitioned, the join should not be pushed."""
query = """
r3 = scan({x});
s3 = scan({y});
intermediate = select r3.a, s3.f from r3, s3 where r3.b=s3.e;
store(intermediate, OUTPUT);
""".format(x=self.x_key, y=self.y_key)
pp = self.get_physical_plan(query, push_sql=True)
# Join is not pushed
self.assertEquals(self.get_count(pp, Join), 1)
# The projections are pushed into the QueryScan
self.assertEquals(self.get_count(pp, MyriaQueryScan), 2)
# We should not need any Apply since there is no rename and no other
# project.
self.assertEquals(self.get_count(pp, Apply), 0)
expected = collections.Counter([(a, f)
for (a, b, c) in self.x_data
for (d, e, f) in self.y_data
if b == e])
self.db.evaluate(pp)
result = self.db.get_table('OUTPUT')
self.assertEquals(result, expected)
def test_no_push_when_random(self):
"""Selection with RANDOM() doesn't push through joins"""
query = """
r = scan({x});
s = scan({y});
t = [from r,s where random()*10 > .3 emit *];
store(t, OUTPUT);
""".format(x=self.x_key, y=self.y_key)
lp = self.get_logical_plan(query)
self.assertEquals(self.get_count(lp, Select), 1)
self.assertEquals(self.get_count(lp, CrossProduct), 1)
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, Select), 1)
self.assertEquals(self.get_count(pp, CrossProduct), 1)
# The selection should happen after the cross product
for op in pp.walk():
if isinstance(op, Select):
self.assertIsInstance(op.input, MyriaSplitConsumer)
self.assertIsInstance(op.input.input.input, CrossProduct)
def test_partitioning_from_shuffle(self):
"""Store will know the partitioning of a shuffled relation"""
query = """
r = scan({x});
store(r, OUTPUT);
""".format(x=self.x_key)
lp = self.get_logical_plan(query)
# insert a shuffle
tail = lp.args[0].input
lp.args[0].input = Shuffle(tail, [AttIndex(0)])
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 1)
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 1)
self.assertEquals(pp.partitioning().hash_partitioned,
tuple([AttIndex(0)]))
def test_partitioning_from_scan(self):
"""Store will know the partitioning of a partitioned store relation"""
query = """
r = scan({part});
store(r, OUTPUT);
""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
self.assertEquals(pp.partitioning().hash_partitioned,
self.part_partition.hash_partitioned)
def test_repartitioning(self):
"""Shuffle repartition a partitioned relation"""
query = """
r = scan({part});
store(r, OUTPUT);
""".format(part=self.part_key)
lp = self.get_logical_plan(query)
# insert a shuffle
tail = lp.args[0].input
lp.args[0].input = Shuffle(tail, [AttIndex(2)])
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 1)
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 1)
self.assertEquals(pp.partitioning().hash_partitioned,
tuple([AttIndex(2)]))
def test_remove_shuffle(self):
"""No shuffle for hash join needed when the input is partitioned"""
query = """
r = scan({part});
s = scan({part});
t = select * from r, s where r.h = s.h;
store(t, OUTPUT);""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 0)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 0)
def test_do_not_remove_shuffle_left(self):
"""Shuffle for hash join needed when the input is partitioned wrong"""
query = """
r = scan({part});
s = scan({part});
t = select * from r, s where r.i = s.h;
store(t, OUTPUT);""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 1)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 1)
def test_do_not_remove_shuffle_both(self):
"""Shuffle for hash join needed when the input is partitioned wrong"""
query = """
r = scan({part});
s = scan({part});
t = select * from r, s where r.i = s.i;
store(t, OUTPUT);""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 2)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 2)
def test_apply_removes_partitioning(self):
"""Projecting out any partitioned attribute
eliminates partition info"""
query = """
r = scan({part});
s = select g,i from r;
store(s, OUTPUT);
""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
self.assertEquals(pp.partitioning().hash_partitioned,
tuple())
def test_apply_maintains_partitioning(self):
"""Projecting out non-partitioned attributes
does not eliminate partition info"""
query = """
r = scan({part});
s = select h, i from r;
store(s, OUTPUT);
""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
self.assertEquals(pp.partitioning().hash_partitioned,
tuple([AttIndex(0)]))
def test_swapping_apply_maintains_partitioning(self):
"""Projecting out non-partitioned attributes
does not eliminate partition info, even for swaps"""
query = """
r = scan({part});
s = select i, h from r;
store(s, OUTPUT);
""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
self.assertEquals(pp.partitioning().hash_partitioned,
tuple([AttIndex(1)]))
def test_projecting_join_maintains_partitioning(self):
"""Projecting join: projecting out non-partitioned attributes
does not eliminate partition info.
"""
query = """
r = scan({part});
s = scan({part});
t = select r.h, r.i, s.h, s.i from r, s where r.h = s.h;
store(t, OUTPUT);""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
# shuffles should be removed
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 0)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 0)
# TODO: this test case forces conservative behavior
# (in general, info could be h($0) && h($2)
self.assertEquals(pp.partitioning().hash_partitioned,
tuple([AttIndex(0)]))
def test_no_shuffle_for_partitioned_distinct(self):
"""Do not shuffle for Distinct if already partitioned"""
query = """
r = scan({part});
t = select distinct r.h from r;
store(t, OUTPUT);""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
# shuffles should be removed and distinct not decomposed into two
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 0)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 0)
self.assertEquals(self.get_count(pp, MyriaDupElim), 1)
self.db.evaluate(pp)
result = self.db.get_table('OUTPUT')
expected = dict([((h,), 1) for _, h, _ in self.part_data])
self.assertEquals(result, expected)
def test_no_shuffle_for_partitioned_groupby(self):
"""Do not shuffle for groupby if already partitioned"""
query = """
r = scan({part});
t = select r.h, MIN(r.i) from r;
store(t, OUTPUT);""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
# shuffles should be removed and the groupby not decomposed into two
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 0)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 0)
self.assertEquals(self.get_count(pp, MyriaGroupBy), 1)
def test_partition_aware_groupby_into_sql(self):
"""No shuffle for groupby also causes it to be pushed into sql"""
query = """
r = scan({part});
t = select r.h, MIN(r.i) from r;
store(t, OUTPUT);""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp, push_sql=True,
push_sql_grouping=True)
# shuffles should be removed and the groupby not decomposed into two
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 0)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 0)
# should be pushed
self.assertEquals(self.get_count(pp, MyriaGroupBy), 0)
self.assertEquals(self.get_count(pp, MyriaQueryScan), 1)
self.db.evaluate(pp)
result = self.db.get_table('OUTPUT')
temp = dict([(h, sys.maxsize) for _, h, _ in self.part_data])
for _, h, i in self.part_data:
temp[h] = min(temp[h], i)
expected = dict(((h, i), 1) for h, i in temp.items())
self.assertEquals(result, expected)
def test_partition_aware_distinct_into_sql(self):
"""No shuffle for distinct also causes it to be pushed into sql"""
query = """
r = scan({part});
t = select distinct r.h from r;
store(t, OUTPUT);""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp, push_sql=True)
# shuffles should be removed and the groupby not decomposed into two
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 0)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 0)
# should be pushed
self.assertEquals(self.get_count(pp, MyriaGroupBy), 0) # sanity
self.assertEquals(self.get_count(pp, MyriaDupElim), 0)
self.assertEquals(self.get_count(pp, MyriaQueryScan), 1)
self.db.evaluate(pp)
result = self.db.get_table('OUTPUT')
expected = dict([((h,), 1) for _, h, _ in self.part_data])
self.assertEquals(result, expected)
def test_push_half_groupby_into_sql(self):
"""Push the first group by of decomposed group by into sql"""
query = """
r = scan({part});
t = select r.i, MIN(r.h) from r;
store(t, OUTPUT);""".format(part=self.part_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp, push_sql=True,
push_sql_grouping=True)
# wrong partition, so still has shuffle
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 1)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 1)
# one group by should be pushed
self.assertEquals(self.get_count(pp, MyriaGroupBy), 1)
self.assertEquals(self.get_count(pp, MyriaQueryScan), 1)
self.db.evaluate(pp)
result = self.db.get_table('OUTPUT')
temp = dict([(i, sys.maxsize) for _, _, i in self.part_data])
for _, h, i in self.part_data:
temp[i] = min(temp[i], h)
expected = dict(((k, v), 1) for k, v in temp.items())
self.assertEquals(result, expected)
def _check_aggregate_functions_pushed(
self,
func,
expected,
override=False):
if override:
agg = func
else:
agg = "{func}(r.i)".format(func=func)
query = """
r = scan({part});
t = select r.h, {agg} from r;
store(t, OUTPUT);""".format(part=self.part_key, agg=agg)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp, push_sql=True,
push_sql_grouping=True)
self.assertEquals(self.get_count(pp, MyriaQueryScan), 1)
for op in pp.walk():
if isinstance(op, MyriaQueryScan):
self.assertTrue(re.search(expected, op.sql))
def test_aggregate_AVG_pushed(self):
"""AVG is translated properly for postgresql. This is
a function not in SQLAlchemy"""
self._check_aggregate_functions_pushed(
aggregate.AVG.__name__, 'avg')
def test_aggregate_STDDEV_pushed(self):
"""STDEV is translated properly for postgresql. This is
a function that is named differently in Raco and postgresql"""
self._check_aggregate_functions_pushed(
aggregate.STDEV.__name__, 'stddev_samp')
def test_aggregate_COUNTALL_pushed(self):
"""COUNTALL is translated properly for postgresql. This is
a function that is expressed differently in Raco and postgresql"""
# MyriaL parses count(*) to Raco COUNTALL. And COUNTALL
# should currently (under the no nulls semantics of Raco/Myria)
# translate to COUNT(something)
self._check_aggregate_functions_pushed(
'count(*)', r'count[(][a-zA-Z.]+[)]', True)
def test_debroadcast_broadcasted_relation(self):
"""Test that a shuffle over a broadcasted relation debroadcasts it"""
query = """
a = scan({broad});
b = select j,k,l from a where j < 5;
store(b, OUTPUT, [j, k]);""".format(broad=self.broad_key)
pp = self.get_physical_plan(query)
def find_scan(_op):
if isinstance(_op, MyriaQueryScan) or isinstance(_op, MyriaScan):
if _op._debroadcast:
yield True
else:
yield False
else:
yield False
self.assertEquals(self.get_count(pp, MyriaSelect), 1)
self.assertTrue(any(pp.postorder(find_scan)))
def test_broadcast_store(self):
query = """
r = scan({X});
store(r, OUTPUT, broadcast());
""".format(X=self.x_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, MyriaBroadcastConsumer), 1)
self.assertEquals(self.get_count(pp, MyriaBroadcastProducer), 1)
self.assertEquals(pp.partitioning().broadcasted,
RepresentationProperties(
broadcasted=True).broadcasted)
def test_broadcast_join(self):
query = """
b = scan({broad});
x = scan({X});
o = select * from b, x where b.j==x.a;
store(o, OUTPUT);
""".format(X=self.x_key, broad=self.broad_key)
lp = self.get_logical_plan(query)
pp = self.logical_to_physical(lp)
self.assertEquals(self.get_count(pp, MyriaBroadcastProducer), 0)
self.assertEquals(self.get_count(pp, MyriaBroadcastConsumer), 0)
self.assertEquals(self.get_count(pp, MyriaShuffleProducer), 1)
self.assertEquals(self.get_count(pp, MyriaShuffleConsumer), 1)
self.assertEquals(pp.partitioning().broadcasted,
RepresentationProperties().broadcasted)
def test_flatten_unionall(self):
"""Test flattening a chain of UnionAlls"""
query = """
X = scan({x});
a = (select $0 from X) + [from X emit $0] + [from X emit $1];
store(a, a);
""".format(x=self.x_key)
lp = self.get_logical_plan(query)
# should be UNIONAll([UNIONAll([expr_1, expr_2]), expr_3])
self.assertEquals(self.get_count(lp, UnionAll), 2)
pp = self.logical_to_physical(lp)
# should be UNIONALL([expr_1, expr_2, expr_3])
self.assertEquals(self.get_count(pp, MyriaUnionAll), 1)
def list_ops_in_json(self, plan, type):
ops = []
for p in plan['plan']['plans']:
for frag in p['fragments']:
for op in frag['operators']:
if op['opType'] == type:
ops.append(op)
return ops
def test_cc(self):
"""Test Connected Components"""
query = """
E = scan(public:adhoc:Z);
V = select distinct E.src as x from E;
do
CC = [nid, MIN(cid) as cid] <-
[from V emit V.x as nid, V.x as cid] +
[from E, CC where E.src = CC.nid emit E.dst as nid, CC.cid];
until convergence pull_idb;
store(CC, CC);
"""
lp = self.get_logical_plan(query, async_ft='REJOIN')
pp = self.logical_to_physical(lp, async_ft='REJOIN')
for op in pp.children():
for child in op.children():
if isinstance(child, MyriaIDBController):
# for checking rule RemoveSingleSplit
assert not isinstance(op, MyriaSplitProducer)
plan = compile_to_json(query, lp, pp, 'myrial', async_ft='REJOIN')
joins = [op for op in pp.walk()
if isinstance(op, MyriaSymmetricHashJoin)]
assert len(joins) == 1
assert joins[0].pull_order_policy == 'RIGHT'
self.assertEquals(plan['ftMode'], 'REJOIN')
idbs = self.list_ops_in_json(plan, 'IDBController')
self.assertEquals(len(idbs), 1)
self.assertEquals(idbs[0]['argState']['type'], 'KeepMinValue')
self.assertEquals(idbs[0]['sync'], False) # default value: async
sps = self.list_ops_in_json(plan, 'ShuffleProducer')
assert any(sp['argBufferStateType']['type'] == 'KeepMinValue'
for sp in sps if 'argBufferStateType' in sp and
sp['argBufferStateType'] is not None)
def test_lca(self):
"""Test LCA"""
query = """
Cite = scan(public:adhoc:X);
Paper = scan(public:adhoc:Y);
do
Ancestor = [a,b,MIN(dis) as dis] <- [from Cite emit a, b, 1 as dis] +
[from Ancestor, Cite
where Ancestor.b = Cite.a
emit Ancestor.a, Cite.b, Ancestor.dis+1];
LCA = [pid1,pid2,LEXMIN(dis,yr,anc)] <-
[from Ancestor as A1, Ancestor as A2, Paper
where A1.b = A2.b and A1.b = Paper.d and A1.a < A2.a
emit A1.a as pid1, A2.a as pid2,
greater(A1.dis, A2.dis) as dis,
Paper.e as yr, A1.b as anc];
until convergence sync;
store(LCA, LCA);
"""
lp = self.get_logical_plan(query, async_ft='REJOIN')
pp = self.logical_to_physical(lp, async_ft='REJOIN')
plan = compile_to_json(query, lp, pp, 'myrial', async_ft='REJOIN')
idbs = self.list_ops_in_json(plan, 'IDBController')
self.assertEquals(len(idbs), 2)
self.assertEquals(idbs[0]['argState']['type'], 'KeepMinValue')
self.assertEquals(idbs[1]['argState']['type'], 'KeepMinValue')
self.assertEquals(len(idbs[1]['argState']['valueColIndices']), 3)
self.assertEquals(idbs[0]['sync'], True)
self.assertEquals(idbs[1]['sync'], True)
def test_galaxy_evolution(self):
"""Test Galaxy Evolution"""
query = """
GoI = scan(public:adhoc:X);
Particles = scan(public:adhoc:Y);
do
Edges = [time,gid1,gid2,COUNT(*) as num] <-
[from Particles as P1, Particles as P2, Galaxies
where P1.d = P2.d and P1.f+1 = P2.f and
P1.f = Galaxies.time and Galaxies.gid = P1.e
emit P1.f as time, P1.e as gid1, P2.e as gid2];
Galaxies = [time, gid] <-
[from GoI emit 1 as time, GoI.a as gid] +
[from Galaxies, Edges
where Galaxies.time = Edges.time and
Galaxies.gid = Edges.gid1 and Edges.num >= 4
emit Galaxies.time+1, Edges.gid2 as gid];
until convergence async build_EDB;
store(Galaxies, Galaxies);
"""
lp = self.get_logical_plan(query, async_ft='REJOIN')
for op in lp.walk():
if isinstance(op, Select):
# for checking rule RemoveEmptyFilter
assert(op.condition is not None)
pp = self.logical_to_physical(lp, async_ft='REJOIN')
plan = compile_to_json(query, lp, pp, 'myrial', async_ft='REJOIN')
joins = [op for op in pp.walk()
if isinstance(op, MyriaSymmetricHashJoin)]
# The two joins for Edges
assert len(
[j for j in joins if j.pull_order_policy == 'LEFT_EOS']) == 2
idbs = self.list_ops_in_json(plan, 'IDBController')
self.assertEquals(len(idbs), 2)
self.assertEquals(idbs[0]['argState']['type'], 'CountFilter')
self.assertEquals(idbs[1]['argState']['type'], 'DupElim')
self.assertEquals(idbs[0]['sync'], False)
self.assertEquals(idbs[1]['sync'], False)
super(OptimizerTest, self).new_processor()
query = """
GoI = scan(public:adhoc:X);
Particles = scan(public:adhoc:Y);
do
Edges = [time,gid1,gid2,COUNT(*) as num] <-
[from Particles as P1, Particles as P2, Galaxies
where P1.d = P2.d and P1.f+1 = P2.f and
P1.f = Galaxies.time and Galaxies.gid = P1.e
emit P1.f as time, P1.e as gid1, P2.e as gid2];
Galaxies = [time, gid] <-
[from GoI emit 1 as time, GoI.a as gid] +
[from Galaxies, Edges
where Galaxies.time = Edges.time and
Galaxies.gid = Edges.gid1 and Edges.num > 3
emit Galaxies.time+1, Edges.gid2 as gid];
until convergence async build_EDB;
store(Galaxies, Galaxies);
"""
lp = self.get_logical_plan(query, async_ft='REJOIN')
pp = self.logical_to_physical(lp, async_ft='REJOIN')
plan_gt = compile_to_json(query, lp, pp, 'myrial', async_ft='REJOIN')
idbs_gt = self.list_ops_in_json(plan_gt, 'IDBController')
self.assertEquals(idbs_gt[0], idbs[0])
def test_push_select_below_shuffle(self):
"""Test pushing selections below shuffles."""
lp = StoreTemp('OUTPUT',
Select(expression.LTEQ(AttRef("a"), AttRef("b")),
Shuffle(
Scan(self.x_key, self.x_scheme),
[AttRef("a"), AttRef("b")], 'Hash'))) # noqa
self.assertEquals(self.get_count(lp, StoreTemp), 1)
self.assertEquals(self.get_count(lp, Select), 1)
self.assertEquals(self.get_count(lp, Shuffle), 1)
self.assertEquals(self.get_count(lp, Scan), 1)
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, MyriaShuffleConsumer)
self.assertIsInstance(pp.input.input, MyriaShuffleProducer)
self.assertIsInstance(pp.input.input.input, Select)
self.assertIsInstance(pp.input.input.input.input, Scan)
def test_insert_shuffle_after_filescan(self):
"""Test automatically inserting round-robin shuffle after FileScan."""
query = """
X = load('INPUT', csv(schema(a:int,b:int)));
store(X, 'OUTPUT');"""
lp = self.get_logical_plan(query)
self.assertEquals(self.get_count(lp, Store), 1)
self.assertEquals(self.get_count(lp, FileScan), 1)
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, MyriaShuffleConsumer)
self.assertIsInstance(pp.input.input, MyriaShuffleProducer)
self.assertEquals(pp.input.input.shuffle_type, 'RoundRobin')
self.assertIsInstance(pp.input.input.input, FileScan)
def test_elide_extra_shuffle_after_filescan(self):
"""Test eliding default round-robin shuffle after FileScan
if shuffle is already present.
"""
query = """
X = load('INPUT', csv(schema(a:int,b:int)));
store(X, 'OUTPUT', hash(a, b));"""
lp = self.get_logical_plan(query)
self.assertEquals(self.get_count(lp, Store), 1)
self.assertEquals(self.get_count(lp, Shuffle), 1)
self.assertEquals(self.get_count(lp, FileScan), 1)
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, MyriaShuffleConsumer)
self.assertIsInstance(pp.input.input, MyriaShuffleProducer)
self.assertEquals(pp.input.input.shuffle_type, 'Hash')
self.assertIsInstance(pp.input.input.input, FileScan)
def test_push_select_below_shuffle_inserted_for_filescan(self):
"""Test pushing selections below shuffles
automatically inserted after FileScan.
"""
query = """
X = load('INPUT', csv(schema(a:int,b:int)));
Y = select * from X where a > b;
store(Y, 'OUTPUT');"""
lp = self.get_logical_plan(query)
self.assertEquals(self.get_count(lp, Store), 1)
self.assertEquals(self.get_count(lp, Select), 1)
self.assertEquals(self.get_count(lp, FileScan), 1)
pp = self.logical_to_physical(lp)
self.assertIsInstance(pp.input, MyriaShuffleConsumer)
self.assertIsInstance(pp.input.input, MyriaShuffleProducer)
self.assertIsInstance(pp.input.input.input, Select)
self.assertIsInstance(pp.input.input.input.input, FileScan)
|
uwescience/raco
|
raco/myrial/optimizer_tests.py
|
Python
|
bsd-3-clause
| 58,294
|
from __future__ import absolute_import
import socket
import struct
from .consts import *
import six
def uint8(n):
return struct.pack("B", n)
def uint64(n):
return struct.pack("!Q", n)
class Error(Exception):
def __init__(self, code):
self.code = code
def __str__(self):
return strerror(self.code)
def pack(cmd, *args):
msg = []
for a in args:
if isinstance(a, six.integer_types):
msg.append(struct.pack("!I", a))
elif isinstance(a, bytes):
msg.append(a)
else:
raise TypeError(str(type(a))+str(a))
header = struct.pack("!II", cmd, sum(len(i) for i in msg))
return header + b''.join(msg)
def unpack(fmt, buf):
if not fmt.startswith("!"):
fmt = "!" + fmt
return struct.unpack(fmt, buf[:struct.calcsize(fmt)])
class FileInfo:
def __init__(self, inode, name, ftype, mode, uid, gid,
atime, mtime, ctime, nlink, length):
self.inode = inode
self.name = name
self.ftype = self._get_ftype(ftype)
if ftype == TYPE_DIRECTORY:
mode |= S_IFDIR
elif ftype == TYPE_SYMLINK:
mode |= S_IFLNK
elif ftype == TYPE_FILE:
mode |= S_IFREG
self.mode = mode
self.uid = uid
self.gid = gid
self.atime = atime
self.mtime = mtime
self.ctime = ctime
self.nlink = nlink
self.length = length
self.blocks = (length + 511) / 512
def __repr__(self):
return ("FileInfo(%s, inode=%d, type=%s, length=%d)" %
(self.name, self.inode, self.ftype, self.length))
def is_symlink(self):
return self.ftype == TYPE_SYMLINK
def _get_ftype(self, ftype):
ftype_str_map = {1: TYPE_FILE, 2: TYPE_DIRECTORY, 3: TYPE_SYMLINK,
4: TYPE_FIFO, 5: TYPE_BLOCKDEV, 6: TYPE_CHARDEV,
7: TYPE_SOCKET, 8: TYPE_TRASH, 9: TYPE_SUSTAINED}
if ftype in ftype_str_map:
return ftype_str_map[ftype]
return '?'
def attrToFileInfo(inode, attrs, name='', version=(0, 0, 0)):
if len(attrs) != 35:
raise Exception('bad length')
if version < (1, 7, 32):
return FileInfo(inode, name, *struct.unpack("!BHIIIIIIQ", attrs))
else:
return _unpack_to_file_info(inode, attrs, name)
def _unpack_to_file_info(inode, attrs, name):
tup = struct.unpack("!BHIIIIIIQ", attrs)
type_mode = tup[1]
ftype = (type_mode & 0xf000) >> 12
mode = type_mode & 0x0fff
return FileInfo(inode, name, ftype, mode, *(tup[2:]))
def read_chunk(host, port, chunkid, version, size, offset=0):
if offset + size > CHUNKSIZE:
raise ValueError("size too large %s > %s" %
(size, CHUNKSIZE-offset))
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(10)
conn.connect((host, port))
msg = pack(CLTOCS_READ, uint64(chunkid), version, offset, size)
n = conn.send(msg)
while n < len(msg):
if not n:
raise IOError("write failed")
msg = msg[n:]
n = conn.send(msg)
def recv(n):
d = conn.recv(n)
while len(d) < n:
nd = conn.recv(n-len(d))
if not nd:
raise IOError("not enough data")
d += nd
return d
while size > 0:
cmd, l = unpack("II", recv(8))
if cmd == CSTOCL_READ_STATUS:
if l != 9:
raise Exception("readblock: READ_STATUS incorrect message size")
cid, code = unpack("QB", recv(l))
if cid != chunkid:
raise Exception("readblock; READ_STATUS incorrect chunkid")
conn.close()
return
elif cmd == CSTOCL_READ_DATA:
if l < 20 :
raise Exception("readblock; READ_DATA incorrect message size")
cid, bid, boff, bsize, crc = unpack("QHHII", recv(20))
if cid != chunkid:
raise Exception("readblock; READ_STATUS incorrect chunkid")
if l != 20 + bsize:
raise Exception("readblock; READ_DATA incorrect message size ")
if bsize == 0 : # FIXME
raise Exception("readblock; empty block")
if bid != offset >> 16:
raise Exception("readblock; READ_DATA incorrect block number")
if boff != offset & 0xFFFF:
raise Exception("readblock; READ_DATA incorrect block offset")
breq = 65536 - boff
if size < breq:
breq = size
if bsize != breq:
raise Exception("readblock; READ_DATA incorrect block size")
while breq > 0:
data = conn.recv(breq)
if not data:
raise IOError("unexpected ending: need %d" % breq)
yield data
breq -= len(data)
offset += bsize
size -= bsize
else:
raise Exception("readblock; unknown message: %s" % cmd)
conn.close()
|
windreamer/dpark
|
dpark/file_manager/utils.py
|
Python
|
bsd-3-clause
| 5,115
|
# -*- coding: utf-8 -*-
"""Skunked terms.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: Skunked terms.
date: 2014-06-10 12:31:19
categories: writing
---
Archaism.
"""
from proselint.tools import existence_check, memoize
@memoize
def check_skunked(text):
"""Check the text."""
err = "garner.skunked_terms"
msg = u"""'{}' is a bit of a skunked term — impossible to use without issue.
Find some other way to say it."""
skunked_terms = [
"bona fides",
"deceptively",
"decimate",
"effete",
"fulsome",
"hopefully",
"impassionate",
"Thankfully,",
]
return existence_check(text, skunked_terms, err, msg)
|
jstewmon/proselint
|
proselint/checks/garner/skunked_terms.py
|
Python
|
bsd-3-clause
| 776
|
from django.forms import ModelForm
from .models import TaggedItem
class TagItForm(ModelForm):
class Meta:
model = TaggedItem
fields = ['tag']
|
acidjunk/django-tagger
|
tagger/forms.py
|
Python
|
bsd-3-clause
| 165
|
# -*- coding: utf-8 -*-
import pytest
from _utils import foo, spin
from profiling.profiler import Profiler, ProfilerWrapper
class NullProfiler(Profiler):
def run(self):
yield
class NullProfilerWrapper(ProfilerWrapper):
def run(self):
with self.profiler:
yield
@pytest.fixture
def profiler():
return NullProfiler()
def test_exclude_code(profiler):
foo_code = foo().f_code
with profiler:
assert foo_code not in profiler.stats
profiler.stats.ensure_child(foo_code)
assert foo_code in profiler.stats
profiler.exclude_code(foo_code)
assert foo_code not in profiler.stats
profiler.exclude_code(foo_code)
assert foo_code not in profiler.stats
def test_result(profiler):
__, cpu_time, wall_time = profiler.result()
assert cpu_time == wall_time == 0.0
with profiler:
spin(0.1)
__, cpu_time, wall_time = profiler.result()
assert cpu_time > 0.0
assert wall_time >= 0.1
def test_wrapper(profiler):
wrapper = NullProfilerWrapper(profiler)
assert isinstance(wrapper, Profiler)
assert wrapper.table_class is profiler.table_class
assert wrapper.stats is profiler.stats
__, cpu_time, wall_time = wrapper.result()
assert cpu_time == wall_time == 0.0
with wrapper:
assert wrapper.is_running()
assert profiler.is_running()
assert not wrapper.is_running()
assert not profiler.is_running()
|
JeanPaulShapo/profiling
|
test/test_profiler.py
|
Python
|
bsd-3-clause
| 1,471
|
import os
import pytest
if __name__ == '__main__':
os.environ['PYWEBVIEW_GUI'] = 'cef'
pytest.main()
|
r0x0r/pywebview
|
tests/run_cef.py
|
Python
|
bsd-3-clause
| 110
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 5, transform = "Logit", sigma = 0.0, exog_count = 0, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_Logit/trend_LinearTrend/cycle_5/ar_12/test_artificial_1024_Logit_LinearTrend_5_12_0.py
|
Python
|
bsd-3-clause
| 263
|
"""
Mean log loss from 5-fold CV: 0.480065955962
"""
import copy
import itertools
import numpy as np
import lasagne
import math
import os
import theano
import theano.tensor as T
import time
from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params
from lasagne.nonlinearities import rectify, softmax
from lasagne.objectives import categorical_crossentropy, Objective
from lasagne.updates import adagrad
from sklearn import feature_extraction
from sklearn.base import BaseEstimator
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.utils import check_random_state
from otto_utils import consts, utils
MODEL_NAME = 'model_09_nn_adagrad'
MODE = 'cv' # cv|submission|holdout|tune
class NeuralNetwork(BaseEstimator):
def __init__(self, n_hidden=20, max_epochs=150, batch_size=200,
lr=0.01, epsilon=0.9, dropout=0.5, valid_ratio=0.0,
use_valid=False, verbose=0, random_state=None):
self.n_hidden = n_hidden
self.max_epochs = max_epochs
self.batch_size = batch_size
self.lr = lr
self.epsilon = epsilon
self.dropout = dropout
self.valid_ratio = valid_ratio
self.use_valid = use_valid
self.verbose = verbose
self.random_state = random_state
# State
self.score_ = None
self.classes_ = None
self.n_classes_ = None
self.model = None
def fit(self, data, targets, sample_weight=None):
self.classes_, indices = np.unique(targets, return_inverse=True)
self.n_classes_ = self.classes_.shape[0]
random_state = check_random_state(self.random_state)
# Shuffle data and eventually split on train and validation sets
if self.valid_ratio > 0:
strat_shuffled_split = StratifiedShuffleSplit(targets, test_size=self.valid_ratio,
n_iter=1, random_state=self.random_state)
train_index, valid_index = [s for s in strat_shuffled_split][0]
X_train, y_train = data[train_index], targets[train_index]
X_valid, y_valid = data[valid_index], targets[valid_index]
else:
X_train, y_train = data, targets
X_valid, y_valid = np.array([]), np.array([])
if self.verbose > 5:
print 'X_train: %s, y_train: %s' % (X_train.shape, y_train.shape)
if self.use_valid:
print 'X_valid: %s, y_valid: %s' % (X_valid.shape, y_valid.shape)
# Prepare theano variables
dataset = dict(
X_train=theano.shared(lasagne.utils.floatX(X_train)),
y_train=T.cast(theano.shared(y_train), 'int32'),
X_valid=theano.shared(lasagne.utils.floatX(X_valid)),
y_valid=T.cast(theano.shared(y_valid), 'int32'),
num_examples_train=X_train.shape[0],
num_examples_valid=X_valid.shape[0],
input_dim=X_train.shape[1],
output_dim=self.n_classes_,
)
if self.verbose > 0:
print "Building model and compiling functions..."
output_layer = self.build_model(dataset['input_dim'])
iter_funcs = self.create_iter_functions(dataset, output_layer)
if self.verbose > 0:
print "Starting training..."
now = time.time()
results = []
try:
for epoch in self.train(iter_funcs, dataset, output_layer):
if self.verbose > 1:
print "Epoch {} of {} took {:.3f}s".format(
epoch['number'], self.max_epochs, time.time() - now)
now = time.time()
results.append([epoch['number'], epoch['train_loss'], epoch['valid_loss']])
if self.verbose > 1:
print " training loss:\t\t{:.6f}".format(epoch['train_loss'])
print " validation loss:\t\t{:.6f}".format(epoch['valid_loss'])
print " validation accuracy:\t\t{:.2f} %%".format(
epoch['valid_accuracy'] * 100)
if epoch['number'] >= self.max_epochs:
break
if self.verbose > 0:
print 'Minimum validation error: %f (epoch %d)' % \
(epoch['best_val_error'], epoch['best_val_iter'])
except KeyboardInterrupt:
pass
return self
def predict(self, data):
preds, _ = self.make_predictions(data)
return preds
def predict_proba(self, data):
_, proba = self.make_predictions(data)
return proba
def score(self):
return self.score_
# Private methods
def build_model(self, input_dim):
l_in = InputLayer(shape=(self.batch_size, input_dim))
l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)
l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)
l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)
l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)
return l_out
def create_iter_functions(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
y_batch = T.ivector('y')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
objective = Objective(output_layer, loss_function=categorical_crossentropy)
loss_train = objective.get_loss(X_batch, target=y_batch)
loss_eval = objective.get_loss(X_batch, target=y_batch, deterministic=True)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)
all_params = get_all_params(output_layer)
updates = adagrad(loss_train, all_params, self.lr, self.epsilon)
iter_train = theano.function(
[batch_index], loss_train,
updates=updates,
givens={
X_batch: dataset['X_train'][batch_slice],
y_batch: dataset['y_train'][batch_slice],
},
on_unused_input='ignore',
)
iter_valid = None
if self.use_valid:
iter_valid = theano.function(
[batch_index], [loss_eval, accuracy, proba],
givens={
X_batch: dataset['X_valid'][batch_slice],
y_batch: dataset['y_valid'][batch_slice],
},
)
return dict(train=iter_train, valid=iter_valid)
def create_test_function(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
iter_test = theano.function(
[batch_index], [pred, proba],
givens={
X_batch: dataset['X_test'][batch_slice],
},
)
return dict(test=iter_test)
def train(self, iter_funcs, dataset, output_layer):
num_batches_train = dataset['num_examples_train'] // self.batch_size
num_batches_valid = int(math.ceil(dataset['num_examples_valid'] / float(self.batch_size)))
best_val_err = 100
best_val_iter = -1
for epoch in itertools.count(1):
batch_train_losses = []
for b in range(num_batches_train):
batch_train_loss = iter_funcs['train'](b)
batch_train_losses.append(batch_train_loss)
avg_train_loss = np.mean(batch_train_losses)
batch_valid_losses = []
batch_valid_accuracies = []
batch_valid_probas = []
if self.use_valid:
for b in range(num_batches_valid):
batch_valid_loss, batch_valid_accuracy, batch_valid_proba = iter_funcs['valid'](b)
batch_valid_losses.append(batch_valid_loss)
batch_valid_accuracies.append(batch_valid_accuracy)
batch_valid_probas.append(batch_valid_proba)
avg_valid_loss = np.mean(batch_valid_losses)
avg_valid_accuracy = np.mean(batch_valid_accuracies)
if (best_val_err > avg_valid_loss and self.use_valid) or\
(epoch == self.max_epochs and not self.use_valid):
best_val_err = avg_valid_loss
best_val_iter = epoch
# Save model
self.score_ = best_val_err
self.model = copy.deepcopy(output_layer)
yield {
'number': epoch,
'train_loss': avg_train_loss,
'valid_loss': avg_valid_loss,
'valid_accuracy': avg_valid_accuracy,
'best_val_error': best_val_err,
'best_val_iter': best_val_iter,
}
def make_predictions(self, data):
dataset = dict(
X_test=theano.shared(lasagne.utils.floatX(data)),
num_examples_test=data.shape[0],
input_dim=data.shape[1],
output_dim=self.n_classes_,
)
iter_funcs = self.create_test_function(dataset, self.model)
num_batches_test = int(math.ceil(dataset['num_examples_test'] / float(self.batch_size)))
test_preds, test_probas = np.array([]), None
for b in range(num_batches_test):
batch_test_pred, batch_test_proba = iter_funcs['test'](b)
test_preds = np.append(test_preds, batch_test_pred)
test_probas = np.append(test_probas, batch_test_proba, axis=0) if test_probas is not None else batch_test_proba
return test_preds, test_probas
if __name__ == '__main__':
train, labels, test, _, _ = utils.load_data()
# Preprocess data - transform counts to TFIDF features
tfidf = feature_extraction.text.TfidfTransformer(smooth_idf=False)
train = np.append(train, tfidf.fit_transform(train).toarray(), axis=1)
test = np.append(test, tfidf.transform(test).toarray(), axis=1)
clf = NeuralNetwork(512, 110, 128, 0.004438538808932511, 1.6674644616533133e-14, 0.2137591043893735,
.02, True, 10, random_state=23)
if MODE == 'cv':
scores, predictions = utils.make_blender_cv(clf, train, labels, calibrate=False)
print 'CV:', scores, 'Mean log loss:', np.mean(scores)
utils.write_blender_data(consts.BLEND_PATH, MODEL_NAME + '.csv', predictions)
elif MODE == 'submission':
clf.fit(train, labels)
predictions = clf.predict_proba(test)
utils.save_submission(consts.DATA_SAMPLE_SUBMISSION_PATH,
os.path.join(consts.ENSEMBLE_PATH, MODEL_NAME + '.csv'),
predictions)
elif MODE == 'holdout':
score = utils.hold_out_evaluation(clf, train, labels, calibrate=False)
print 'Log loss:', score
else:
print 'Unknown mode'
|
ahara/kaggle_otto
|
otto/model/model_09_nn_adagrad/nn_adagrad.py
|
Python
|
bsd-3-clause
| 11,589
|
import os
import shutil
import tempfile
import re
from StringIO import StringIO
# import zipfile
import custom_zip as zipfile
from xml.etree import ElementTree
try:
# Python 3
from urllib.parse import urlparse, urljoin
except ImportError:
# Python 2
from urlparse import urlparse, urljoin
import metadata
import utils
import exceptions
# XML names
_XML_ROOT_ELEM = 'omex:omexManifest'
_XML_ROOT_NS = 'http://identifiers.org/combine.specifications/omex-manifest'
_XML_CONTENT_TAG = 'omex:content'
_XML_CONTENT_LOCATION = 'omex:location'
_XML_CONTENT_FORMAT = 'omex:format'
_XML_CONTENT_MASTER = 'omex:master'
_XML_CONTENT_ARCHIVE_TYPE = 'http://identifiers.org/combine.specifications/omex'
_XML_CONTENT_METADATA_TYPE = 'http://identifiers.org/combine.specifications/omex-metadata'
_XML_NS = {
'omex': _XML_ROOT_NS,
metadata.Namespace.RDF: metadata.Namespace.RDF_URI,
metadata.Namespace.DC: metadata.Namespace.DC_URI,
metadata.Namespace.VCARD: metadata.Namespace.VCARD_URI,
metadata.Namespace.BQMODEL: metadata.Namespace.BQMODEL_URI,
}
# register namespaces to ElementTree
for prefix, url in _XML_NS.items():
ElementTree.register_namespace(prefix, url)
class CombineArchive(metadata.MetaDataHolder):
"""
base class for reading, creating and modifying COMBINE Archives
"""
# location of manifest and metadata
MANIFEST_LOCATION = 'manifest.xml'
METADATA_LOCATION = 'metadata.rdf'
# paths used in the manifest to assign meta data to the archive itself
ARCHIVE_REFERENCE = ('.', '/')
def __init__(self, archive):
super(CombineArchive, self).__init__()
self._archive = archive
self._zip = zipfile.ZipFile(archive, mode='a')
self.entries = dict()
self._read_manifest()
self._read_metadata()
def __exit__(self):
self.close()
def _read_manifest(self):
"""
internal function.
Reads the manifest file of a COMBINE Archive
"""
try:
with self._zip.open(self.MANIFEST_LOCATION) as manifest_file:
manifest = ElementTree.fromstring(manifest_file.read())
except KeyError:
# manifest does not exists, probably an empty/new archive
return False
except ElementTree.ParseError as e:
raise exceptions.CombineArchiveException('Cannot parse xml manifest. {}'.format(e.msg))
# check for correct root element and namespace
if manifest.tag != utils.extend_tag_name(_XML_ROOT_ELEM, _XML_NS):
raise exceptions.CombineArchiveException('manifest has no valid omex root element')
# check entries
for entry in manifest.findall(_XML_CONTENT_TAG, _XML_NS):
try:
location = utils.get_attribute(entry, _XML_CONTENT_LOCATION, _XML_NS)
entry_format = utils.check_format(utils.get_attribute(entry, _XML_CONTENT_FORMAT, _XML_NS), convert=False)
master = True if entry.attrib.get(_XML_CONTENT_MASTER, False) in ('True', 'true', True) else False
except KeyError:
raise exceptions.CombineArchiveException('location and format field are required. Corrupt manifest.xml')
# clean location
location = utils.clean_pathname(location)
# check if file is in zip, if it's not the root element
zipinfo = None
if location not in self.ARCHIVE_REFERENCE:
try:
zipinfo = self._zip.getinfo(location)
except KeyError:
raise exceptions.CombineArchiveException(
'{location} is specified by the manifest, but not contained by the ZIP file'.format(location=location))
archive_entry = ArchiveEntry(location, format=entry_format, master=master, archive=self, zipinfo=zipinfo)
self.entries[location] = archive_entry
def _read_metadata(self):
# go over all possible metdata files
for meta_file in self.filter_format(_XML_CONTENT_METADATA_TYPE):
try:
# parse the xml
meta = ElementTree.fromstring(meta_file.read())
except ElementTree.ParseError as e:
raise exceptions.CombineArchiveException(
'Cannot parse xml metadata {file}. {msg}'.format(file=meta_file.location, msg=e.msg))
# find every rdf:Description
for description in meta.findall(metadata.Namespace.rdf_terms.description, _XML_NS):
try:
about_url = urlparse(utils.get_attribute(description, metadata.Namespace.rdf_terms.about, _XML_NS))
about_str = about_url.path
fragment_str = about_url.fragment
except KeyError:
raise exceptions.CombineArchiveException('A metadata description tag has to have an about field')
if about_str in self.ARCHIVE_REFERENCE:
# meta data is about the archive (root element)
about = self
else:
# meta data is about normal file
about = self.get_entry(about_str)
# start parsing
try:
data = metadata.OmexMetaDataObject(xml_element=description)
except ValueError as e:
data = metadata.DefaultMetaDataObject(xml_element=description)
about.add_description(data, fragment=fragment_str)
def _write_manifest(self, zip_file=None):
"""
internal function.
Writes the manifest file of a COMBINE Archive
"""
if zip_file is None:
zip_file = self._zip
# create new DOM object
manifest = ElementTree.Element(utils.extend_tag_name(_XML_ROOT_ELEM, _XML_NS))
# write first entry for archive itself
content = ElementTree.SubElement(manifest, utils.extend_tag_name(_XML_CONTENT_TAG, _XML_NS))
content.attrib.update({
utils.extend_tag_name(_XML_CONTENT_LOCATION, _XML_NS): '.',
utils.extend_tag_name(_XML_CONTENT_FORMAT, _XML_NS): _XML_CONTENT_ARCHIVE_TYPE,
})
for (location, entry) in self.entries.items():
entry_format = utils.check_format(entry.format)
content = ElementTree.SubElement(manifest, utils.extend_tag_name(_XML_CONTENT_TAG, _XML_NS))
content.attrib.update({
utils.extend_tag_name(_XML_CONTENT_LOCATION, _XML_NS): location,
utils.extend_tag_name(_XML_CONTENT_FORMAT, _XML_NS): entry_format,
})
if entry.master:
content.attrib[utils.extend_tag_name(_XML_CONTENT_MASTER, _XML_NS)] = 'true'
# prettify xml
utils.indent(manifest)
# write xml to zip
io = StringIO()
ElementTree.ElementTree(manifest).write(io, xml_declaration=True, default_namespace=_XML_ROOT_NS, encoding='utf-8')
try:
zip_file.remove(self.MANIFEST_LOCATION)
except KeyError:
pass # Manifest does not exist yet, so removing it will fail
zip_file.writestr(self.MANIFEST_LOCATION, io.getvalue())
io.close()
def _write_metadata(self):
# create new Element object for RDF
rdf = ElementTree.Element(utils.extend_tag_name(metadata.Namespace.rdf_terms.rdf, _XML_NS))
# iterate over archive metadata
for description in self.description:
desc_elem = description._rebuild_xml()
desc_elem.attrib[utils.extend_tag_name(metadata.Namespace.rdf_terms.about, _XML_NS)] = self.ARCHIVE_REFERENCE[0]
rdf.append(desc_elem)
# iterate over all metadata for each entry
for (location, entry) in self.entries.items():
if not isinstance(entry, metadata.MetaDataHolder):
continue
for description in entry.description:
desc_elem = description._rebuild_xml()
desc_elem.attrib[utils.extend_tag_name(metadata.Namespace.rdf_terms.about, _XML_NS)] = location
rdf.append(desc_elem)
# prettify xml
utils.indent(rdf)
# write xml to zip
io = StringIO()
ElementTree.ElementTree(rdf).write(io, xml_declaration=True, encoding='utf-8')
self.add_entry(io.getvalue(), _XML_CONTENT_METADATA_TYPE, location=self.METADATA_LOCATION, replace=True)
io.close()
def close(self):
"""
closes the COMBINE Archive.
Does not write any changes to the manifest. Needs to be invoked by pack()
"""
self._zip.close()
def repack(self, output_file=None):
"""
rewrites the COMBINE archive with all changes and metadata into a temp file and then attemps
to replace to original archive. Works only with archive, which really exist on the filesystem (no StringIO)
"""
if output_file is None:
try:
new_file = tempfile.NamedTemporaryFile(
dir=os.path.dirname(self._archive), delete=False )
except:
new_file = tempfile.NamedTemporaryFile(delete=False)
else:
new_file = output_file
new_zip = zipfile.ZipFile(new_file, mode='a')
# add main entries
self._write_metadata() # write metadata first, so the ArchiveEntry is updated
self._write_manifest(zip_file=new_zip)
# add all entries
for (location, entry) in self.entries.items():
if location in self.ARCHIVE_REFERENCE or location == self.MANIFEST_LOCATION:
# skip root entry (representing the archive itself) and the two main entries (manifest and metadata)
continue
if entry.zipinfo is None:
entry.zipinfo = self._zip.getinfo(location)
buffer = self._zip.read(entry.zipinfo)
new_zip.writestr(entry.zipinfo, buffer)
# close both zip files
new_zip.close()
self._zip.close()
if output_file is None:
# remove old file and move new one
os.remove(self._archive)
shutil.move(new_file.name, self._archive)
else:
if not isinstance(self._archive, (str, unicode)):
# is a file descriptor
self._archive.close()
self._archive = new_file
# open new zip file
self._zip = zipfile.ZipFile(self._archive, mode='a')
def pack(self):
"""
writes any change of manifest or metadate into the COMBINE archive
"""
# add main entries
self._write_metadata() # write metadata first, so the ArchiveEntry is updated
self._write_manifest()
# close and reopen zipfile, so the zip dictionary gots written
self._zip.close()
self._zip = zipfile.ZipFile(self._archive, mode='a')
def add_entry(self, file, format, location=None, master=False, replace=False):
"""
adds a file-like object to the COMBINE archive and adds a manifest entry
if file is an instance of unicode or str, the content of this variable is written as content
Returns:
ArchiveEntry
"""
if not file or not format:
raise exceptions.CombineArchiveException('both a file and the corresponding format must be provided')
# check format schema
format = utils.check_format(format)
# no location provided. Guess it
if location is None or not location:
location = os.path.basename(file)
# clean location
location = utils.clean_pathname(location)
if location == self.MANIFEST_LOCATION or location in self.ARCHIVE_REFERENCE:
raise exceptions.CombineArchiveException('it is not allowed to name a file {loc}'.format(loc=location))
if location in self._zip.namelist():
if replace is False:
raise exceptions.CombineArchiveException('{loc} exists already in the COMBINE archive. set replace=True, to override it'.format(loc=location))
else:
self.remove_entry(location)
# write file to zip
if isinstance(file, (str, unicode)):
# file is actually string
zipinfo = self._zip.writestr(location, file)
else:
zipinfo = self._zip.write(file, location)
entry = ArchiveEntry(location, format=format, master=master, zipinfo=zipinfo, archive=self)
self.entries[entry.location] = entry
return entry
def remove_entry(self, location):
"""
Removes an entry from the COMBINE archive. The file will remain in the
zip archive, until pack() is called.
"""
location = utils.clean_pathname(location)
if self.entries[location]:
self._zip.remove(location)
del self.entries[location]
else:
raise KeyError('Did not found {loc} in COMBINE archive'.format(loc=location))
def get_entry(self, location):
"""
Returns the archive entry in the given location or raises an KeyError,
if not found
"""
location = utils.clean_pathname(location)
if self.entries[location]:
return self.entries[location]
else:
raise KeyError('Did not found {loc} in COMBINE archive'.format(loc=location))
def filter_format(self, format, regex=False):
"""
Generator including all archive entries with a given format.
Is regex=True, format will be compiled to a regular expression and
matched against the entry formats
"""
if not format:
raise KeyError('You need to provide an format')
if regex is True:
pattern = re.compile(format)
else:
pattern = None
# check format argument against spec
try:
utils.check_format(format)
except exceptions.CombineArchiveFormatException as e:
raise KeyError(
'{format} is no valid format, according to the OMEX specification. {cause}'.format(format=format,
cause=e.message))
for (location, entry) in self.entries.items():
if pattern is not None and pattern.match(entry.format):
yield entry
elif pattern is None and format == entry.format:
yield entry
def get_master_entries(self):
"""
Returns a list of entries with set master flag
"""
return [entry for entry in self.entries.values() if entry.master is True]
class ArchiveEntry(metadata.MetaDataHolder):
"""
represents a single entry in a COMBINE archive
"""
def __init__(self, location, format=None, master=False, zipinfo=None, archive=None):
super(ArchiveEntry, self).__init__()
self.location = location
self.format = format
self.master = master
self.archive = archive
self.zipinfo = zipinfo
def read(self):
if self.zipinfo is not None and self.archive is not None:
return self.archive._zip.read(self.zipinfo)
elif self.zipinfo is None and self.archive is not None:
return self.archive._zip.read(self.location)
else:
raise exceptions.CombineArchiveException('There is no reference back to the Combine archive')
|
FreakyBytes/pyCombineArchive
|
combinearchive/combinearchive.py
|
Python
|
bsd-3-clause
| 15,637
|
from __future__ import absolute_import, print_function
__all__ = ['JavaScriptStacktraceProcessor']
import logging
import re
import base64
import six
import zlib
from django.conf import settings
from os.path import splitext
from requests.utils import get_encoding_from_headers
from six.moves.urllib.parse import urlparse, urljoin, urlsplit
from libsourcemap import from_json as view_from_json
# In case SSL is unavailable (light builds) we can't import this here.
try:
from OpenSSL.SSL import ZeroReturnError
except ImportError:
class ZeroReturnError(Exception):
pass
from sentry import http
from sentry.interfaces.stacktrace import Stacktrace
from sentry.models import EventError, ReleaseFile
from sentry.utils.cache import cache
from sentry.utils.files import compress_file
from sentry.utils.hashlib import md5_text
from sentry.utils.http import is_valid_origin
from sentry.utils import metrics
from sentry.stacktraces import StacktraceProcessor
from .cache import SourceCache, SourceMapCache
# number of surrounding lines (on each side) to fetch
LINES_OF_CONTEXT = 5
BASE64_SOURCEMAP_PREAMBLE = 'data:application/json;base64,'
BASE64_PREAMBLE_LENGTH = len(BASE64_SOURCEMAP_PREAMBLE)
UNKNOWN_MODULE = '<unknown module>'
CLEAN_MODULE_RE = re.compile(
r"""^
(?:/| # Leading slashes
(?:
(?:java)?scripts?|js|build|static|node_modules|bower_components|[_\.~].*?| # common folder prefixes
v?(?:\d+\.)*\d+| # version numbers, v1, 1.0.0
[a-f0-9]{7,8}| # short sha
[a-f0-9]{32}| # md5
[a-f0-9]{40} # sha1
)/)+|
(?:[-\.][a-f0-9]{7,}$) # Ending in a commitish
""", re.X | re.I
)
VERSION_RE = re.compile(r'^[a-f0-9]{32}|[a-f0-9]{40}$', re.I)
NODE_MODULES_RE = re.compile(r'\bnode_modules/')
SOURCE_MAPPING_URL_RE = re.compile(r'\/\/# sourceMappingURL=(.*)$')
# the maximum number of remote resources (i.e. sourc eifles) that should be
# fetched
MAX_RESOURCE_FETCHES = 100
logger = logging.getLogger(__name__)
class UnparseableSourcemap(http.BadSource):
error_type = EventError.JS_INVALID_SOURCEMAP
def trim_line(line, column=0):
"""
Trims a line down to a goal of 140 characters, with a little
wiggle room to be sensible and tries to trim around the given
`column`. So it tries to extract 60 characters before and after
the provided `column` and yield a better context.
"""
line = line.strip(u'\n')
ll = len(line)
if ll <= 150:
return line
if column > ll:
column = ll
start = max(column - 60, 0)
# Round down if it brings us close to the edge
if start < 5:
start = 0
end = min(start + 140, ll)
# Round up to the end if it's close
if end > ll - 5:
end = ll
# If we are bumped all the way to the end,
# make sure we still get a full 140 characters in the line
if end == ll:
start = max(end - 140, 0)
line = line[start:end]
if end < ll:
# we've snipped from the end
line += u' {snip}'
if start > 0:
# we've snipped from the beginning
line = u'{snip} ' + line
return line
def get_source_context(source, lineno, colno, context=LINES_OF_CONTEXT):
if not source:
return None, None, None
# lineno's in JS are 1-indexed
# just in case. sometimes math is hard
if lineno > 0:
lineno -= 1
lower_bound = max(0, lineno - context)
upper_bound = min(lineno + 1 + context, len(source))
try:
pre_context = [trim_line(x) for x in source[lower_bound:lineno]]
except IndexError:
pre_context = []
try:
context_line = trim_line(source[lineno], colno)
except IndexError:
context_line = ''
try:
post_context = [trim_line(x) for x in source[(lineno + 1):upper_bound]]
except IndexError:
post_context = []
return pre_context or None, context_line, post_context or None
def discover_sourcemap(result):
"""
Given a UrlResult object, attempt to discover a sourcemap.
"""
# When coercing the headers returned by urllib to a dict
# all keys become lowercase so they're normalized
sourcemap = result.headers.get('sourcemap', result.headers.get('x-sourcemap'))
if not sourcemap:
parsed_body = result.body.split('\n')
# Source maps are only going to exist at either the top or bottom of the document.
# Technically, there isn't anything indicating *where* it should exist, so we
# are generous and assume it's somewhere either in the first or last 5 lines.
# If it's somewhere else in the document, you're probably doing it wrong.
if len(parsed_body) > 10:
possibilities = parsed_body[:5] + parsed_body[-5:]
else:
possibilities = parsed_body
# We want to scan each line sequentially, and the last one found wins
# This behavior is undocumented, but matches what Chrome and Firefox do.
for line in possibilities:
if line[:21] in ('//# sourceMappingURL=', '//@ sourceMappingURL='):
# We want everything AFTER the indicator, which is 21 chars long
sourcemap = line[21:].rstrip()
# If we still haven't found anything, check end of last line AFTER source code.
# This is not the literal interpretation of the spec, but browsers support it.
# e.g. {code}//# sourceMappingURL={url}
if not sourcemap:
# Only look at last 300 characters to keep search space reasonable (minified
# JS on a single line could be tens of thousands of chars). This is a totally
# arbitrary number / best guess; most sourceMappingURLs are relative and
# not very long.
search_space = possibilities[-1][-300:].rstrip()
match = SOURCE_MAPPING_URL_RE.search(search_space)
if match:
sourcemap = match.group(1)
if sourcemap:
# react-native shoves a comment at the end of the
# sourceMappingURL line.
# For example:
# sourceMappingURL=app.js.map/*ascii:...*/
# This comment is completely out of spec and no browser
# would support this, but we need to strip it to make
# people happy.
if '/*' in sourcemap and sourcemap[-2:] == '*/':
index = sourcemap.index('/*')
# comment definitely shouldn't be the first character,
# so let's just make sure of that.
if index == 0:
raise AssertionError(
'react-native comment found at bad location: %d, %r' % (index, sourcemap)
)
sourcemap = sourcemap[:index]
# fix url so its absolute
sourcemap = urljoin(result.url, sourcemap)
return sourcemap
def fetch_release_file(filename, release, dist=None):
cache_key = 'releasefile:v1:%s:%s' % (release.id, md5_text(filename).hexdigest(), )
filename_path = None
if filename is not None:
# Reconstruct url without protocol + host
# e.g. http://example.com/foo?bar => ~/foo?bar
parsed_url = urlparse(filename)
filename_path = '~' + parsed_url.path
if parsed_url.query:
filename_path += '?' + parsed_url.query
logger.debug('Checking cache for release artifact %r (release_id=%s)', filename, release.id)
result = cache.get(cache_key)
dist_name = dist and dist.name or None
if result is None:
logger.debug(
'Checking database for release artifact %r (release_id=%s)', filename, release.id
)
filename_idents = [ReleaseFile.get_ident(filename, dist_name)]
if filename_path is not None and filename_path != filename:
filename_idents.append(ReleaseFile.get_ident(filename_path, dist_name))
possible_files = list(
ReleaseFile.objects.filter(
release=release,
dist=dist,
ident__in=filename_idents,
).select_related('file')
)
if len(possible_files) == 0:
logger.debug(
'Release artifact %r not found in database (release_id=%s)', filename, release.id
)
cache.set(cache_key, -1, 60)
return None
elif len(possible_files) == 1:
releasefile = possible_files[0]
else:
# Prioritize releasefile that matches full url (w/ host)
# over hostless releasefile
target_ident = filename_idents[0]
releasefile = next((f for f in possible_files if f.ident == target_ident))
logger.debug(
'Found release artifact %r (id=%s, release_id=%s)', filename, releasefile.id, release.id
)
try:
with metrics.timer('sourcemaps.release_file_read'):
with releasefile.file.getfile() as fp:
z_body, body = compress_file(fp)
except Exception as e:
logger.exception(six.text_type(e))
cache.set(cache_key, -1, 3600)
result = None
else:
headers = {k.lower(): v for k, v in releasefile.file.headers.items()}
encoding = get_encoding_from_headers(headers)
result = http.UrlResult(filename, headers, body, 200, encoding)
cache.set(cache_key, (headers, z_body, 200, encoding), 3600)
elif result == -1:
# We cached an error, so normalize
# it down to None
result = None
else:
# Previous caches would be a 3-tuple instead of a 4-tuple,
# so this is being maintained for backwards compatibility
try:
encoding = result[3]
except IndexError:
encoding = None
result = http.UrlResult(
filename, result[0], zlib.decompress(result[1]), result[2], encoding
)
return result
def fetch_file(url, project=None, release=None, dist=None, allow_scraping=True):
"""
Pull down a URL, returning a UrlResult object.
Attempts to fetch from the cache.
"""
# If our url has been truncated, it'd be impossible to fetch
# so we check for this early and bail
if url[-3:] == '...':
raise http.CannotFetch(
{
'type': EventError.JS_MISSING_SOURCE,
'url': http.expose_url(url),
}
)
if release:
with metrics.timer('sourcemaps.release_file'):
result = fetch_release_file(url, release, dist)
else:
result = None
cache_key = 'source:cache:v4:%s' % (md5_text(url).hexdigest(), )
if result is None:
if not allow_scraping or not url.startswith(('http:', 'https:')):
error = {
'type': EventError.JS_MISSING_SOURCE,
'url': http.expose_url(url),
}
raise http.CannotFetch(error)
logger.debug('Checking cache for url %r', url)
result = cache.get(cache_key)
if result is not None:
# Previous caches would be a 3-tuple instead of a 4-tuple,
# so this is being maintained for backwards compatibility
try:
encoding = result[4]
except IndexError:
encoding = None
# We got a cache hit, but the body is compressed, so we
# need to decompress it before handing it off
result = http.UrlResult(
result[0], result[1], zlib.decompress(result[2]), result[3], encoding
)
if result is None:
headers = {}
verify_ssl = False
if project and is_valid_origin(url, project=project):
verify_ssl = bool(project.get_option('sentry:verify_ssl', False))
token = project.get_option('sentry:token')
if token:
token_header = project.get_option(
'sentry:token_header',
'X-Sentry-Token',
)
headers[token_header] = token
with metrics.timer('sourcemaps.fetch'):
result = http.fetch_file(url, headers=headers, verify_ssl=verify_ssl)
z_body = zlib.compress(result.body)
cache.set(cache_key, (url, result.headers, z_body, result.status, result.encoding), 60)
# Make sure the file we're getting back is six.binary_type. The only
# reason it'd not be binary would be from old cached blobs, so
# for compatibility with current cached files, let's coerce back to
# binary and say utf8 encoding.
if not isinstance(result.body, six.binary_type):
try:
result = http.UrlResult(
result.url, result.headers,
result.body.encode('utf8'), result.status, result.encoding
)
except UnicodeEncodeError:
error = {
'type': EventError.FETCH_INVALID_ENCODING,
'value': 'utf8',
'url': http.expose_url(url),
}
raise http.CannotFetch(error)
# For JavaScript files, check if content is something other than JavaScript/JSON (i.e. HTML)
# NOTE: possible to have JS files that don't actually end w/ ".js", but
# this should catch 99% of cases
if url.endswith('.js'):
# Check if response is HTML by looking if the first non-whitespace character is an open tag ('<').
# This cannot parse as valid JS/JSON.
# NOTE: not relying on Content-Type header because apps often don't set this correctly
# Discard leading whitespace (often found before doctype)
body_start = result.body[:20].lstrip()
if body_start[:1] == u'<':
error = {
'type': EventError.JS_INVALID_CONTENT,
'url': url,
}
raise http.CannotFetch(error)
return result
def fetch_sourcemap(url, project=None, release=None, dist=None, allow_scraping=True):
if is_data_uri(url):
try:
body = base64.b64decode(
url[BASE64_PREAMBLE_LENGTH:] + (b'=' * (-(len(url) - BASE64_PREAMBLE_LENGTH) % 4))
)
except TypeError as e:
raise UnparseableSourcemap({
'url': '<base64>',
'reason': e.message,
})
else:
result = fetch_file(
url, project=project, release=release, dist=dist, allow_scraping=allow_scraping
)
body = result.body
try:
return view_from_json(body)
except Exception as exc:
# This is in debug because the product shows an error already.
logger.debug(six.text_type(exc), exc_info=True)
raise UnparseableSourcemap({
'url': http.expose_url(url),
})
def is_data_uri(url):
return url[:BASE64_PREAMBLE_LENGTH] == BASE64_SOURCEMAP_PREAMBLE
def generate_module(src):
"""
Converts a url into a made-up module name by doing the following:
* Extract just the path name ignoring querystrings
* Trimming off the initial /
* Trimming off the file extension
* Removes off useless folder prefixes
e.g. http://google.com/js/v1.0/foo/bar/baz.js -> foo/bar/baz
"""
if not src:
return UNKNOWN_MODULE
filename, ext = splitext(urlsplit(src).path)
if ext not in ('.js', '.jsx', '.coffee'):
return UNKNOWN_MODULE
if filename.endswith('.min'):
filename = filename[:-4]
# TODO(dcramer): replace CLEAN_MODULE_RE with tokenizer completely
tokens = filename.split('/')
for idx, token in enumerate(tokens):
# a SHA
if VERSION_RE.match(token):
return '/'.join(tokens[idx + 1:])
return CLEAN_MODULE_RE.sub('', filename) or UNKNOWN_MODULE
class JavaScriptStacktraceProcessor(StacktraceProcessor):
"""
Attempts to fetch source code for javascript frames.
Frames must match the following requirements:
- lineno >= 0
- colno >= 0
- abs_path is the HTTP URI to the source
- context_line is empty
Mutates the input ``data`` with expanded context if available.
"""
def __init__(self, *args, **kwargs):
StacktraceProcessor.__init__(self, *args, **kwargs)
self.max_fetches = MAX_RESOURCE_FETCHES
self.allow_scraping = self.project.get_option('sentry:scrape_javascript', True)
self.fetch_count = 0
self.sourcemaps_touched = set()
self.cache = SourceCache()
self.sourcemaps = SourceMapCache()
self.release = None
self.dist = None
def get_stacktraces(self, data):
try:
stacktraces = [
e['stacktrace'] for e in data['sentry.interfaces.Exception']['values']
if e.get('stacktrace')
]
except KeyError:
stacktraces = []
if 'sentry.interfaces.Stacktrace' in data:
stacktraces.append(data['sentry.interfaces.Stacktrace'])
return [(s, Stacktrace.to_python(s)) for s in stacktraces]
def get_valid_frames(self):
# build list of frames that we can actually grab source for
frames = []
for info in self.stacktrace_infos:
frames.extend([f for f in info.stacktrace['frames'] if f.get('lineno') is not None])
return frames
def preprocess_step(self, processing_task):
frames = self.get_valid_frames()
if not frames:
logger.debug(
'Event %r has no frames with enough context to '
'fetch remote source', self.data['event_id']
)
return False
self.release = self.get_release(create=True)
if self.data.get('dist') and self.release:
self.dist = self.release.get_dist(self.data['dist'])
self.populate_source_cache(frames)
return True
def handles_frame(self, frame, stacktrace_info):
platform = frame.get('platform') or self.data.get('platform')
return (settings.SENTRY_SCRAPE_JAVASCRIPT_CONTEXT and platform == 'javascript')
def process_frame(self, processable_frame, processing_task):
frame = processable_frame.frame
last_token = None
token = None
cache = self.cache
sourcemaps = self.sourcemaps
all_errors = []
sourcemap_applied = False
# can't fetch source if there's no filename present
if not frame.get('abs_path'):
return
errors = cache.get_errors(frame['abs_path'])
if errors:
all_errors.extend(errors)
# This might fail but that's okay, we try with a different path a
# bit later down the road.
source = self.get_source(frame['abs_path'])
in_app = None
new_frame = dict(frame)
raw_frame = dict(frame)
sourcemap_url, sourcemap_view = sourcemaps.get_link(frame['abs_path'])
self.sourcemaps_touched.add(sourcemap_url)
if sourcemap_view and frame.get('colno') is None:
all_errors.append(
{
'type': EventError.JS_NO_COLUMN,
'url': http.expose_url(frame['abs_path']),
}
)
elif sourcemap_view:
last_token = token
if is_data_uri(sourcemap_url):
sourcemap_label = frame['abs_path']
else:
sourcemap_label = sourcemap_url
sourcemap_label = http.expose_url(sourcemap_label)
try:
# Errors are 1-indexed in the frames, so we need to -1 to get
# zero-indexed value from tokens.
assert frame['lineno'] > 0, "line numbers are 1-indexed"
token = sourcemap_view.lookup_token(frame['lineno'] - 1, frame['colno'])
except Exception:
token = None
all_errors.append(
{
'type': EventError.JS_INVALID_SOURCEMAP_LOCATION,
'column': frame.get('colno'),
'row': frame.get('lineno'),
'source': frame['abs_path'],
'sourcemap': sourcemap_label,
}
)
# Store original data in annotation
new_frame['data'] = dict(frame.get('data') or {}, sourcemap=sourcemap_label)
sourcemap_applied = True
if token is not None:
abs_path = urljoin(sourcemap_url, token.src)
logger.debug(
'Mapping compressed source %r to mapping in %r', frame['abs_path'], abs_path
)
source = self.get_source(abs_path)
if not source:
errors = cache.get_errors(abs_path)
if errors:
all_errors.extend(errors)
else:
all_errors.append(
{
'type': EventError.JS_MISSING_SOURCE,
'url': http.expose_url(abs_path),
}
)
if token is not None:
# Token's return zero-indexed lineno's
new_frame['lineno'] = token.src_line + 1
new_frame['colno'] = token.src_col
# The offending function is always the previous function in the stack
# Honestly, no idea what the bottom most frame is, so we're ignoring that atm
if last_token:
new_frame['function'] = last_token.name or frame.get('function')
else:
new_frame['function'] = token.name or frame.get('function')
filename = token.src
# special case webpack support
# abs_path will always be the full path with webpack:/// prefix.
# filename will be relative to that
if abs_path.startswith('webpack:'):
filename = abs_path
# webpack seems to use ~ to imply "relative to resolver root"
# which is generally seen for third party deps
# (i.e. node_modules)
if '/~/' in filename:
filename = '~/' + abs_path.split('/~/', 1)[-1]
else:
filename = filename.split('webpack:///', 1)[-1]
# As noted above, '~/' means they're coming from node_modules,
# so these are not app dependencies
if filename.startswith('~/'):
in_app = False
# And conversely, local dependencies start with './'
elif filename.startswith('./'):
in_app = True
# We want to explicitly generate a webpack module name
new_frame['module'] = generate_module(filename)
if abs_path.startswith('app:'):
if NODE_MODULES_RE.search(filename):
in_app = False
else:
in_app = True
new_frame['abs_path'] = abs_path
new_frame['filename'] = filename
if not frame.get('module') and abs_path.startswith(
('http:', 'https:', 'webpack:', 'app:')
):
new_frame['module'] = generate_module(abs_path)
elif sourcemap_url:
new_frame['data'] = dict(
new_frame.get('data') or {}, sourcemap=http.expose_url(sourcemap_url)
)
# TODO: theoretically a minified source could point to
# another mapped, minified source
changed_frame = self.expand_frame(new_frame, source=source)
if not new_frame.get('context_line') and source:
all_errors.append(
{
'type': EventError.JS_INVALID_SOURCEMAP_LOCATION,
# Column might be missing here
'column': new_frame.get('colno'),
# Line might be missing here
'row': new_frame.get('lineno'),
'source': new_frame['abs_path'],
}
)
changed_raw = sourcemap_applied and self.expand_frame(raw_frame)
if sourcemap_applied or all_errors or changed_frame or \
changed_raw:
if in_app is not None:
new_frame['in_app'] = in_app
raw_frame['in_app'] = in_app
return [new_frame], [raw_frame] if changed_raw else None, all_errors
def expand_frame(self, frame, source=None):
if frame.get('lineno') is not None:
if source is None:
source = self.get_source(frame['abs_path'])
if source is None:
logger.debug('No source found for %s', frame['abs_path'])
return False
frame['pre_context'], frame['context_line'], frame['post_context'] \
= get_source_context(source=source, lineno=frame['lineno'],
colno=frame.get('colno') or 0)
return True
return False
def get_source(self, filename):
if filename not in self.cache:
self.cache_source(filename)
return self.cache.get(filename)
def cache_source(self, filename):
sourcemaps = self.sourcemaps
cache = self.cache
self.fetch_count += 1
if self.fetch_count > self.max_fetches:
cache.add_error(filename, {
'type': EventError.JS_TOO_MANY_REMOTE_SOURCES,
})
return
# TODO: respect cache-control/max-age headers to some extent
logger.debug('Fetching remote source %r', filename)
try:
result = fetch_file(
filename,
project=self.project,
release=self.release,
dist=self.dist,
allow_scraping=self.allow_scraping
)
except http.BadSource as exc:
cache.add_error(filename, exc.data)
return
cache.add(filename, result.body, result.encoding)
cache.alias(result.url, filename)
sourcemap_url = discover_sourcemap(result)
if not sourcemap_url:
return
logger.debug('Found sourcemap %r for minified script %r', sourcemap_url[:256], result.url)
sourcemaps.link(filename, sourcemap_url)
if sourcemap_url in sourcemaps:
return
# pull down sourcemap
try:
sourcemap_view = fetch_sourcemap(
sourcemap_url,
project=self.project,
release=self.release,
dist=self.dist,
allow_scraping=self.allow_scraping,
)
except http.BadSource as exc:
cache.add_error(filename, exc.data)
return
sourcemaps.add(sourcemap_url, sourcemap_view)
# cache any inlined sources
for src_id, source in sourcemap_view.iter_sources():
if sourcemap_view.has_source_contents(src_id):
self.cache.add(
urljoin(sourcemap_url, source),
lambda view=sourcemap_view, id=src_id: view.get_source_contents(id),
None,
)
def populate_source_cache(self, frames):
"""
Fetch all sources that we know are required (being referenced directly
in frames).
"""
pending_file_list = set()
for f in frames:
# We can't even attempt to fetch source if abs_path is None
if f.get('abs_path') is None:
continue
# tbh not entirely sure how this happens, but raven-js allows this
# to be caught. I think this comes from dev consoles and whatnot
# where there is no page. This just bails early instead of exposing
# a fetch error that may be confusing.
if f['abs_path'] == '<anonymous>':
continue
pending_file_list.add(f['abs_path'])
for idx, filename in enumerate(pending_file_list):
self.cache_source(
filename=filename,
)
def close(self):
StacktraceProcessor.close(self)
if self.sourcemaps_touched:
metrics.incr(
'sourcemaps.processed',
amount=len(self.sourcemaps_touched),
instance=self.project.id
)
|
jean/sentry
|
src/sentry/lang/javascript/processor.py
|
Python
|
bsd-3-clause
| 28,607
|
"""Tests for UIAWrapper"""
from __future__ import print_function
from __future__ import unicode_literals
import time
import os
import sys
import collections
import unittest
import mock
import six
sys.path.append(".")
from pywinauto.windows.application import Application # noqa: E402
from pywinauto.base_application import WindowSpecification # noqa: E402
from pywinauto.sysinfo import is_x64_Python, UIA_support # noqa: E402
from pywinauto.timings import Timings, wait_until # noqa: E402
from pywinauto.actionlogger import ActionLogger # noqa: E402
from pywinauto import Desktop
from pywinauto import mouse # noqa: E402
from pywinauto import WindowNotFoundError # noqa: E402
if UIA_support:
import comtypes
import pywinauto.windows.uia_defines as uia_defs
import pywinauto.controls.uia_controls as uia_ctls
from pywinauto.controls.uiawrapper import UIAWrapper
from pywinauto.windows.uia_element_info import UIAElementInfo
wpf_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\WPF_samples")
if is_x64_Python():
wpf_samples_folder = os.path.join(wpf_samples_folder, 'x64')
wpf_app_1 = os.path.join(wpf_samples_folder, u"WpfApplication1.exe")
mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
mfc_app_rebar_test = os.path.join(mfc_samples_folder, u"RebarTest.exe")
winforms_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\WinForms_samples")
if is_x64_Python():
winforms_folder = os.path.join(winforms_folder, 'x64')
winfoms_app_grid = os.path.join(winforms_folder, u"DataGridView_TestApp.exe")
if sys.version_info[:2] >= (3, 6):
qt_python_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\Qt_Python_samples")
qt_py_combobox_app = sys.executable + ' ' + os.path.join(qt_python_folder, u"qt5_combobox.py")
if UIA_support:
def _set_timings():
"""Setup timings for UIA related tests"""
Timings.defaults()
Timings.window_find_timeout = 20
class UIAWrapperTests(unittest.TestCase):
"""Unit tests for the UIAWrapper class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
mouse.move((-500, 500)) # remove the mouse from the screen to avoid side effects
# start the application
self.app = Application(backend='uia')
self.app = self.app.start(wpf_app_1)
self.dlg = self.app.WPFSampleApplication
def test_get_active_uia(self):
focused_element = self.dlg.get_active()
self.assertTrue(type(focused_element) is UIAWrapper or issubclass(type(focused_element), UIAWrapper))
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_issue_296(self):
"""Test handling of disappeared descendants"""
wrp = self.dlg.find()
with mock.patch.object(wrp.element_info._element, 'FindAll') as mock_findall:
mock_findall.side_effect = ValueError("Mocked value error")
self.assertEqual([], wrp.descendants())
mock_findall.side_effect = comtypes.COMError(-2147220991, "Mocked COM error", ())
self.assertEqual([], wrp.descendants())
def test_issue_278(self):
"""Test that statement menu = app.MainWindow.Menu works for 'uia' backend"""
menu_spec = self.dlg.Menu
self.assertTrue(isinstance(menu_spec, WindowSpecification))
# Also check the app binding
self.assertTrue(menu_spec.app, self.app)
def test_find_nontop_ctl_by_class_name_and_title(self):
"""Test getting a non-top control by a class name and a title"""
# Look up for a non-top button control with 'Apply' caption
self.dlg.wait('ready')
caption = 'Apply'
wins = self.app.windows(top_level_only=False,
class_name='Button',
name=caption)
# Verify the number of found wrappers
self.assertEqual(len(wins), 1)
# Verify the caption of the found wrapper
self.assertEqual(wins[0].texts()[0], caption)
def test_find_top_win_by_class_name_and_title(self):
"""Test getting a top window by a class name and a title"""
# Since the top_level_only is True by default
# we don't specify it as a criteria argument
self.dlg.wait('ready')
caption = 'WPF Sample Application'
wins = self.app.windows(class_name='Window', name=caption)
# Verify the number of found wrappers
self.assertEqual(len(wins), 1)
# Verify the caption of the found wrapper
self.assertEqual(wins[0].texts()[0], caption)
def test_class(self):
"""Test getting the classname of the dialog"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertEqual(button.class_name(), "Button")
def test_window_text(self):
"""Test getting the window Text of the dialog"""
label = self.dlg.TestLabel.find()
self.assertEqual(label.window_text(), u"TestLabel")
self.assertEqual(label.can_be_label, True)
def test_control_id(self):
"""Test getting control ID"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertEqual(button.control_id(), None)
def test_runtime_id(self):
"""Test getting runtime ID"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertNotEqual(button.__hash__(), 0)
orig = button.element_info._element.GetRuntimeId
exception_err = comtypes.COMError(-2147220991, 'An event was unable to invoke any of the subscribers', ())
button.element_info._element.GetRuntimeId = mock.Mock(side_effect=exception_err)
self.assertEqual(button.__hash__(), 0)
button.element_info._element.GetRuntimeId = orig # restore the original method
def test_automation_id(self):
"""Test getting automation ID"""
alpha_toolbar = self.dlg.by(name="Alpha", control_type="ToolBar")
button = alpha_toolbar.by(control_type="Button",
auto_id="OverflowButton").find()
self.assertEqual(button.automation_id(), "OverflowButton")
def test_access_key(self):
"""Test find element by access key"""
file_menu = self.dlg.by(access_key="Alt+F").find()
self.assertEqual("MenuItem", file_menu.element_info.control_type)
self.assertEqual("File", file_menu.element_info.name)
def test_legacy_shortcut(self):
"""Test find element by keyboard shortcut value from legacy pattern"""
file_menu = self.dlg.by(legacy_shortcut="Alt+F").find()
self.assertEqual("MenuItem", file_menu.element_info.control_type)
self.assertEqual("File", file_menu.element_info.name)
def test_value(self):
"""Test find element by value"""
edit = self.dlg.by(auto_id="edit1").find()
edit.set_edit_text("Test string")
edit_by_value = self.dlg.by(value="Test string").find()
self.assertEqual("edit1", edit_by_value.element_info.auto_id)
def test_legacy_value(self):
"""Test find element by value from legacy pattern"""
edit = self.dlg.by(auto_id="edit1").find()
edit.set_edit_text("Test string")
edit_by_value = self.dlg.by(legacy_value="Test string").find()
self.assertEqual("edit1", edit_by_value.element_info.auto_id)
def test_legacy_action(self):
"""Test find element by default action name from legacy pattern"""
combobox = self.dlg.by(legacy_action="Expand").find()
self.assertEqual("ComboBox", combobox.element_info.control_type)
self.assertEqual(2, combobox.item_count())
def test_legacy_descr(self):
"""Test find element by description from legacy pattern"""
close_button = self.dlg.by(legacy_descr="Closes the window").find()
self.assertEqual("Button", close_button.element_info.control_type)
self.assertEqual("Close", close_button.element_info.legacy_name)
def test_legacy_help_not_available(self):
"""Test return empty string if LegacyIAccessible.Help value is not available"""
close_button = self.dlg.by(control_type="TitleBar").find()
self.assertEqual("", close_button.element_info.legacy_help)
def test_is_visible(self):
"""Test is_visible method of a control"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertEqual(button.is_visible(), True)
def test_is_enabled(self):
"""Test is_enabled method of a control"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertEqual(button.is_enabled(), True)
def test_process_id(self):
"""Test process_id method of a control"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertEqual(button.process_id(), self.dlg.process_id())
self.assertNotEqual(button.process_id(), 0)
def test_is_dialog(self):
"""Test is_dialog method of a control"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertEqual(button.is_dialog(), False)
self.assertEqual(self.dlg.is_dialog(), True)
def test_move_window(self):
"""Test move_window without any parameters"""
# move_window with default parameters
prevRect = self.dlg.rectangle()
self.dlg.move_window()
self.assertEqual(prevRect, self.dlg.rectangle())
# move_window call for a not supported control
button = self.dlg.by(class_name="Button", name="OK")
self.assertRaises(AttributeError, button.move_window)
# Make RECT stub to avoid import win32structures
Rect = collections.namedtuple('Rect', 'left top right bottom')
prev_rect = self.dlg.rectangle()
new_rect = Rect._make([i + 5 for i in prev_rect])
self.dlg.move_window(
new_rect.left,
new_rect.top,
new_rect.right - new_rect.left,
new_rect.bottom - new_rect.top
)
time.sleep(0.1)
logger = ActionLogger()
logger.log("prev_rect = %s", prev_rect)
logger.log("new_rect = %s", new_rect)
logger.log("self.dlg.rectangle() = %s", self.dlg.rectangle())
self.assertEqual(self.dlg.rectangle(), new_rect)
self.dlg.move_window(prev_rect)
self.assertEqual(self.dlg.rectangle(), prev_rect)
def test_close(self):
"""Test close method of a control"""
wrp = self.dlg.find()
# mock a failure in get_elem_interface() method only for 'Window' param
orig_get_elem_interface = uia_defs.get_elem_interface
with mock.patch.object(uia_defs, 'get_elem_interface') as mock_get_iface:
def side_effect(elm_info, ptrn_name):
if ptrn_name == "Window":
raise uia_defs.NoPatternInterfaceError()
else:
return orig_get_elem_interface(elm_info, ptrn_name)
mock_get_iface.side_effect=side_effect
# also mock a failure in type_keys() method
with mock.patch.object(UIAWrapper, 'type_keys') as mock_type_keys:
exception_err = comtypes.COMError(-2147220991, 'An event was unable to invoke any of the subscribers', ())
mock_type_keys.side_effect = exception_err
self.assertRaises(WindowNotFoundError, self.dlg.close)
self.dlg.close()
self.assertEqual(self.dlg.exists(), False)
def test_parent(self):
"""Test getting a parent of a control"""
button = self.dlg.Alpha.find()
self.assertEqual(button.parent(), self.dlg.find())
def test_top_level_parent(self):
"""Test getting a top-level parent of a control"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertEqual(button.top_level_parent(), self.dlg.find())
def test_texts(self):
"""Test getting texts of a control"""
self.assertEqual(self.dlg.texts(), ['WPF Sample Application'])
def test_children(self):
"""Test getting children of a control"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertEqual(len(button.children()), 1)
self.assertEqual(button.children()[0].class_name(), "TextBlock")
def test_children_generator(self):
"""Test iterating children of a control"""
button = self.dlg.by(class_name="Button", name="OK").find()
children = [child for child in button.iter_children()]
self.assertEqual(len(children), 1)
self.assertEqual(children[0].class_name(), "TextBlock")
def test_descendants(self):
"""Test iterating descendants of a control"""
toolbar = self.dlg.by(name="Alpha", control_type="ToolBar").find()
descendants = toolbar.descendants()
self.assertEqual(len(descendants), 7)
def test_descendants_generator(self):
toolbar = self.dlg.by(name="Alpha", control_type="ToolBar").find()
descendants = [desc for desc in toolbar.iter_descendants()]
self.assertSequenceEqual(toolbar.descendants(), descendants)
def test_is_child(self):
"""Test is_child method of a control"""
button = self.dlg.Alpha.find()
self.assertEqual(button.is_child(self.dlg.find()), True)
def test_equals(self):
"""Test controls comparisons"""
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertNotEqual(button, self.dlg.find())
self.assertEqual(button, button.element_info)
self.assertEqual(button, button)
@unittest.skip("To be solved with issue #790")
def test_scroll(self):
"""Test scroll"""
# Check an exception on a non-scrollable control
button = self.dlg.by(class_name="Button",
name="OK").find()
six.assertRaisesRegex(self, AttributeError, "not scrollable",
button.scroll, "left", "page")
# Check an exception on a control without horizontal scroll bar
tab = self.dlg.Tree_and_List_Views.set_focus()
listview = tab.children(class_name=u"ListView")[0]
six.assertRaisesRegex(self, AttributeError, "not horizontally scrollable",
listview.scroll, "right", "line")
# Check exceptions on wrong arguments
self.assertRaises(ValueError, listview.scroll, "bbbb", "line")
self.assertRaises(ValueError, listview.scroll, "up", "aaaa")
# Store a cell position
cell = listview.cell(3, 0)
orig_rect = cell.rectangle()
self.assertEqual(orig_rect.left > 0, True)
# Trigger a horizontal scroll bar on the control
hdr = listview.get_header_control()
hdr_itm = hdr.children()[1]
trf = hdr_itm.iface_transform
trf.resize(1000, 20)
listview.scroll("right", "page", 2)
self.assertEqual(cell.rectangle().left < 0, True)
# Check an exception on a control without vertical scroll bar
tab = self.dlg.ListBox_and_Grid.set_focus()
datagrid = tab.children(class_name=u"DataGrid")[0]
six.assertRaisesRegex(self, AttributeError, "not vertically scrollable",
datagrid.scroll, "down", "page")
# def testVerifyActionable(self):
# self.assertRaises()
# def testVerifyEnabled(self):
# self.assertRaises()
# def testVerifyVisible(self):
# self.assertRaises()
def test_is_keyboard_focusable(self):
"""Test is_keyboard focusable method of several controls"""
edit = self.dlg.TestLabelEdit.find()
label = self.dlg.TestLabel.find()
button = self.dlg.by(class_name="Button",
name="OK").find()
self.assertEqual(button.is_keyboard_focusable(), True)
self.assertEqual(edit.is_keyboard_focusable(), True)
self.assertEqual(label.is_keyboard_focusable(), False)
def test_set_focus(self):
"""Test setting a keyboard focus on a control"""
edit = self.dlg.TestLabelEdit.find()
edit.set_focus()
self.assertEqual(edit.has_keyboard_focus(), True)
def test_get_active_desktop_uia(self):
focused_element = Desktop(backend="uia").get_active()
self.assertTrue(type(focused_element) is UIAWrapper or issubclass(type(focused_element), UIAWrapper))
def test_type_keys(self):
"""Test sending key types to a control"""
edit = self.dlg.TestLabelEdit.find()
edit.type_keys("t")
self.assertEqual(edit.window_text(), "t")
edit.type_keys("e")
self.assertEqual(edit.window_text(), "te")
edit.type_keys("s")
self.assertEqual(edit.window_text(), "tes")
edit.type_keys("t")
self.assertEqual(edit.window_text(), "test")
edit.type_keys("T")
self.assertEqual(edit.window_text(), "testT")
edit.type_keys("y")
self.assertEqual(edit.window_text(), "testTy")
def test_no_pattern_interface_error(self):
"""Test a query interface exception handling"""
button = self.dlg.by(class_name="Button",
name="OK").find()
elem = button.element_info.element
self.assertRaises(
uia_defs.NoPatternInterfaceError,
uia_defs.get_elem_interface,
elem,
"Selection",
)
def test_minimize_maximize(self):
"""Test window minimize/maximize operations"""
wrp = self.dlg.minimize()
self.dlg.wait_not('active')
self.assertEqual(wrp.is_minimized(), True)
wrp.maximize()
self.dlg.wait('active')
self.assertEqual(wrp.is_maximized(), True)
wrp.minimize()
self.dlg.wait_not('active')
wrp.restore()
self.dlg.wait('active')
self.assertEqual(wrp.is_normal(), True)
def test_get_properties(self):
"""Test getting writeble properties of a control"""
uia_props = set(['class_name',
'friendly_class_name',
'texts',
'control_id',
'rectangle',
'is_visible',
'is_enabled',
'control_count',
'is_keyboard_focusable',
'has_keyboard_focus',
'selection_indices',
'automation_id',
])
edit = self.dlg.TestLabelEdit.find()
props = set(edit.get_properties().keys())
self.assertEqual(props, uia_props)
# def test_draw_outline(self):
# """Test the outline was drawn."""
# # not sure why, but this extra call makes the test stable
# self.dlg.draw_outline()
#
# # outline control
# button = self.dlg.OK.wrapper_object()
# button.draw_outline()
# img1 = button.capture_as_image()
# self.assertEqual(img1.getpixel((0, 0)), (0, 255, 0)) # green
#
# # outline window
# self.dlg.draw_outline(colour="blue")
# img2 = self.dlg.capture_as_image()
# self.assertEqual(img2.getpixel((0, 0)), (0, 0, 255)) # blue
def test_get_legacy_properties(self):
"""Test getting legacy properties of a control"""
expected_properties = {'Value': '',
'DefaultAction': 'Press',
'Description': '',
'Name': 'OK',
'Help': '',
'ChildId': 0,
'KeyboardShortcut': '',
'State': 1048576,
'Role': 43}
button_wrp = self.dlg.by(class_name="Button",
name="OK").find()
actual_properties = button_wrp.legacy_properties()
self.assertEqual(actual_properties, expected_properties)
def test_capture_as_image_multi_monitor(self):
with mock.patch('win32api.EnumDisplayMonitors') as mon_device:
mon_device.return_value = (1, 2)
rect = self.dlg.rectangle()
expected = (rect.width(), rect.height())
result = self.dlg.capture_as_image().size
self.assertEqual(expected, result)
def test_set_value(self):
"""Test for UIAWrapper.set_value"""
edit = self.dlg.by(control_type='Edit', auto_id='edit1').find()
self.assertEqual(edit.get_value(), '')
edit.set_value('test')
self.assertEqual(edit.get_value(), 'test')
class UIAWrapperRawViewWalkerTests(UIAWrapperTests):
"""Unit tests for the UIAWrapper class with enabled RawViewWalker"""
def setUp(self):
self.default_use_raw_view_walker = UIAElementInfo.use_raw_view_walker
UIAElementInfo.use_raw_view_walker = True
super(UIAWrapperRawViewWalkerTests, self).setUp()
def tearDown(self):
UIAElementInfo.use_raw_view_walker = self.default_use_raw_view_walker
super(UIAWrapperRawViewWalkerTests, self).tearDown()
def test_issue_296(self):
"""Test handling of disappeared descendants"""
wrp = self.dlg.wrapper_object()
with mock.patch.object(uia_defs.IUIA().raw_tree_walker, 'GetFirstChildElement') as mock_get_first_child:
mock_get_first_child.side_effect = ValueError("Mocked value error")
self.assertEqual([], wrp.descendants())
mock_get_first_child.side_effect = comtypes.COMError(-2147220991, "Mocked COM error", ())
self.assertEqual([], wrp.descendants())
class UIAWrapperMouseTests(unittest.TestCase):
"""Unit tests for mouse actions of the UIAWrapper class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
self.app = Application(backend='uia')
self.app = self.app.start(wpf_app_1)
dlg = self.app.WPFSampleApplication
self.button = dlg.by(class_name="Button",
name="OK").find()
self.label = dlg.by(class_name="Text", name="TestLabel").find()
self.app.wait_cpu_usage_lower(threshold=1.5, timeout=30, usage_interval=1.0)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
# def test_click(self):
# pass
def test_click_input(self):
"""Test click_input method of a control"""
self.button.click_input()
self.assertEqual(self.label.window_text(), "LeftClick")
# def test_double_click(self):
# pass
def test_double_click_input(self):
"""Test double_click_input method of a control"""
self.button.double_click_input()
self.assertEqual(self.label.window_text(), "DoubleClick")
# def test_right_click(self):
# pass
def test_right_click_input(self):
"""Test right_click_input method of a control"""
self.button.right_click_input()
self.assertEqual(self.label.window_text(), "RightClick")
# def test_press_move_release(self):
# pass
class UiaControlsTests(unittest.TestCase):
"""Unit tests for the UIA control wrappers"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
app = Application(backend='uia')
self.app = app.start(wpf_app_1)
self.dlg = self.app.WPFSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_pretty_print(self):
"""Test __str__ and __repr__ methods for UIA based controls"""
if six.PY3:
assert_regex = self.assertRegex
else:
assert_regex = self.assertRegexpMatches
wrp = self.dlg.OK.find()
assert_regex(wrp.__str__(), "^uia_controls\.ButtonWrapper - 'OK', Button$")
assert_regex(wrp.__repr__(), "^<uia_controls\.ButtonWrapper - 'OK', Button, [0-9-]+>$")
wrp = self.dlg.CheckBox.find()
assert_regex(wrp.__str__(), "^uia_controls\.ButtonWrapper - 'CheckBox', CheckBox$", )
assert_regex(wrp.__repr__(), "^<uia_controls\.ButtonWrapper - 'CheckBox', CheckBox, [0-9-]+>$", )
wrp = self.dlg.by(class_name="TextBox").find()
assert_regex(wrp.__str__(), "^uia_controls\.EditWrapper - '', Edit$")
assert_regex(wrp.__repr__(), "^<uia_controls\.EditWrapper - '', Edit, [0-9-]+>$")
assert_regex(wrp.element_info.__str__(), "^uia_element_info.UIAElementInfo - '', TextBox$")
assert_regex(wrp.element_info.__repr__(), "^<uia_element_info.UIAElementInfo - '', TextBox, None>$")
wrp = self.dlg.TabControl.find()
assert_regex(wrp.__str__(), "^uia_controls\.TabControlWrapper - '', TabControl$")
assert_regex(wrp.__repr__(), "^<uia_controls\.TabControlWrapper - '', TabControl, [0-9-]+>$")
wrp = self.dlg.MenuBar.find()
assert_regex(wrp.__str__(), "^uia_controls\.MenuWrapper - 'System', Menu$")
assert_regex(wrp.__repr__(), "^<uia_controls\.MenuWrapper - 'System', Menu, [0-9-]+>$")
wrp = self.dlg.Slider.find()
assert_regex(wrp.__str__(), "^uia_controls\.SliderWrapper - '', Slider$")
assert_regex(wrp.__repr__(), "^<uia_controls\.SliderWrapper - '', Slider, [0-9-]+>$")
wrp = self.dlg.TestLabel.find()
assert_regex(wrp.__str__(),
"^uia_controls.StaticWrapper - 'TestLabel', Static$")
assert_regex(wrp.__repr__(),
"^<uia_controls.StaticWrapper - 'TestLabel', Static, [0-9-]+>$")
wrp = self.dlg.find()
assert_regex(wrp.__str__(), "^uia_controls\.WindowWrapper - 'WPF Sample Application', Dialog$")
assert_regex(wrp.__repr__(), "^<uia_controls\.WindowWrapper - 'WPF Sample Application', Dialog, [0-9-]+>$")
# ElementInfo.__str__
assert_regex(wrp.element_info.__str__(),
"^uia_element_info.UIAElementInfo - 'WPF Sample Application', Window$")
assert_regex(wrp.element_info.__repr__(),
"^<uia_element_info.UIAElementInfo - 'WPF Sample Application', Window, [0-9-]+>$")
# mock a failure in window_text() method
orig = wrp.window_text
wrp.window_text = mock.Mock(return_value="") # empty text
assert_regex(wrp.__str__(), "^uia_controls\.WindowWrapper - '', Dialog$")
assert_regex(wrp.__repr__(), "^<uia_controls\.WindowWrapper - '', Dialog, [0-9-]+>$")
wrp.window_text.return_value = u'\xd1\xc1\\\xa1\xb1\ua000' # unicode string
assert_regex(wrp.__str__(), "^uia_controls\.WindowWrapper - '.+', Dialog$")
wrp.window_text = orig # restore the original method
# mock a failure in element_info.name property (it's based on _get_name())
orig = wrp.element_info._get_name
wrp.element_info._get_name = mock.Mock(return_value=None)
assert_regex(wrp.element_info.__str__(), "^uia_element_info\.UIAElementInfo - 'None', Window$")
assert_regex(wrp.element_info.__repr__(), "^<uia_element_info\.UIAElementInfo - 'None', Window, [0-9-]+>$")
wrp.element_info._get_name = orig
def test_pretty_print_encode_error(self):
"""Test __repr__ method for BaseWrapper with specific Unicode text (issue #594)"""
wrp = self.dlg.find()
wrp.window_text = mock.Mock(return_value=u'\xb7')
print(wrp)
print(repr(wrp))
def test_friendly_class_names(self):
"""Test getting friendly class names of common controls"""
button = self.dlg.OK.find()
self.assertEqual(button.friendly_class_name(), "Button")
friendly_name = self.dlg.CheckBox.friendly_class_name()
self.assertEqual(friendly_name, "CheckBox")
friendly_name = self.dlg.Apply.friendly_class_name()
self.assertEqual(friendly_name, "Button")
friendly_name = self.dlg.ToggleMe.friendly_class_name()
self.assertEqual(friendly_name, "Button")
friendly_name = self.dlg.Yes.friendly_class_name()
self.assertEqual(friendly_name, "RadioButton")
friendly_name = self.dlg.TabControl.friendly_class_name()
self.assertEqual(friendly_name, "TabControl")
edit = self.dlg.by(class_name="TextBox").find()
self.assertEqual(edit.friendly_class_name(), "Edit")
slider = self.dlg.Slider.find()
self.assertEqual(slider.friendly_class_name(), "Slider")
self.assertEqual(self.dlg.MenuBar.friendly_class_name(), "Menu")
self.assertEqual(self.dlg.Toolbar.friendly_class_name(), "Toolbar")
# Switch tab view
tab_item_wrp = self.dlg.TreeAndListViews.set_focus()
ctrl = tab_item_wrp.children(control_type="DataGrid")[0]
self.assertEqual(ctrl.friendly_class_name(), "ListView")
i = ctrl.get_item(1)
self.assertEqual(i.friendly_class_name(), "DataItem")
ctrl = tab_item_wrp.children(control_type="Tree")[0]
self.assertEqual(ctrl.friendly_class_name(), "TreeView")
ti = self.dlg.Tree_and_List_ViewsTabItem.DateElements
self.assertEqual(ti.friendly_class_name(), "TreeItem")
def test_check_box(self):
"""Test 'toggle' and 'toggle_state' for the check box control"""
# Get a current state of the check box control
check_box = self.dlg.CheckBox.find()
cur_state = check_box.get_toggle_state()
self.assertEqual(cur_state, uia_defs.toggle_state_inderteminate)
# Toggle the next state
cur_state = check_box.toggle().get_toggle_state()
# Get a new state of the check box control
self.assertEqual(cur_state, uia_defs.toggle_state_off)
def test_toggle_button(self):
"""Test 'toggle' and 'toggle_state' for the toggle button control"""
# Get a current state of the check box control
button = self.dlg.ToggleMe.find()
cur_state = button.get_toggle_state()
self.assertEqual(cur_state, uia_defs.toggle_state_on)
# Toggle the next state
cur_state = button.toggle().get_toggle_state()
# Get a new state of the check box control
self.assertEqual(cur_state, uia_defs.toggle_state_off)
# Toggle the next state
cur_state = button.toggle().get_toggle_state()
self.assertEqual(cur_state, uia_defs.toggle_state_on)
def test_button_click(self):
"""Test the click method for the Button control"""
label = self.dlg.by(class_name="Text",
name="TestLabel").find()
self.dlg.Apply.click()
self.assertEqual(label.window_text(), "ApplyClick")
def test_radio_button(self):
"""Test 'select' and 'is_selected' for the radio button control"""
yes = self.dlg.Yes.find()
cur_state = yes.is_selected()
self.assertEqual(cur_state, False)
cur_state = yes.select().is_selected()
self.assertEqual(cur_state, True)
no = self.dlg.No.find()
cur_state = no.click().is_selected()
self.assertEqual(cur_state, True)
def test_combobox_texts(self):
"""Test items texts for the combo box control"""
# The ComboBox on the sample app has following items:
# 0. Combo Item 1
# 1. Combo Item 2
ref_texts = ['Combo Item 1', 'Combo Item 2']
combo_box = self.dlg.ComboBox.find()
self.assertEqual(combo_box.item_count(), len(ref_texts))
for t in combo_box.texts():
self.assertEqual((t in ref_texts), True)
# Mock a 0 pointer to COM element
combo_box.iface_item_container.FindItemByProperty = mock.Mock(return_value=0)
self.assertEqual(combo_box.texts(), ref_texts)
# Mock a combobox without "ItemContainer" pattern
combo_box.iface_item_container.FindItemByProperty = mock.Mock(side_effect=uia_defs.NoPatternInterfaceError())
self.assertEqual(combo_box.texts(), ref_texts)
# Mock a combobox without "ExpandCollapse" pattern
# Expect empty texts
combo_box.iface_expand_collapse.Expand = mock.Mock(side_effect=uia_defs.NoPatternInterfaceError())
self.assertEqual(combo_box.texts(), [])
def test_combobox_select(self):
"""Test select related methods for the combo box control"""
combo_box = self.dlg.ComboBox.find()
# Verify combobox properties and an initial state
self.assertEqual(combo_box.can_select_multiple(), 0)
self.assertEqual(combo_box.is_selection_required(), False)
self.assertEqual(len(combo_box.get_selection()), 0)
# The ComboBox on the sample app has following items:
# 0. Combo Item 1
# 1. Combo Item 2
combo_box.select(0)
self.assertEqual(combo_box.selected_text(), 'Combo Item 1')
self.assertEqual(combo_box.selected_index(), 0)
collapsed = combo_box.is_collapsed()
self.assertEqual(collapsed, True)
combo_box.select(1)
self.assertEqual(combo_box.selected_text(), 'Combo Item 2')
self.assertEqual(combo_box.selected_index(), 1)
combo_box.select('Combo Item 1')
self.assertEqual(combo_box.selected_text(), 'Combo Item 1')
# Try to use unsupported item type as a parameter for select
self.assertRaises(ValueError, combo_box.select, 1.2)
# Try to select a non-existing item,
# verify the selected item didn't change
self.assertRaises(IndexError, combo_box.select, 'Combo Item 23455')
self.assertEqual(combo_box.selected_text(), 'Combo Item 1')
def test_combobox_expand_collapse(self):
"""Test 'expand' and 'collapse' for the combo box control"""
combo_box = self.dlg.ComboBox.find()
collapsed = combo_box.is_collapsed()
self.assertEqual(collapsed, True)
expanded = combo_box.expand().is_expanded()
self.assertEqual(expanded, True)
collapsed = combo_box.collapse().is_collapsed()
self.assertEqual(collapsed, True)
class TabControlWrapperTests(unittest.TestCase):
"""Unit tests for the TabControlWrapper class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
app = Application(backend='uia')
app = app.start(wpf_app_1)
dlg = app.WPFSampleApplication
self.app = app
self.ctrl = dlg.by(class_name="TabControl").find()
self.texts = [u"General", u"Tree and List Views", u"ListBox and Grid"]
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_tab_count(self):
"""Test the tab count in the Tab control"""
self.assertEqual(self.ctrl.tab_count(), len(self.texts))
def test_get_selected_tab(self):
"""Test selecting a tab by index or by name and getting an index of the selected tab"""
# Select a tab by name, use chaining to get the index of the selected tab
idx = self.ctrl.select(u"Tree and List Views").get_selected_tab()
self.assertEqual(idx, 1)
# Select a tab by index
self.ctrl.select(0)
self.assertEqual(self.ctrl.get_selected_tab(), 0)
def test_texts(self):
"""Make sure the tabs captions are read correctly"""
self.assertEqual(self.ctrl.texts(), self.texts)
class EditWrapperTests(unittest.TestCase):
"""Unit tests for the EditWrapper class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
app = Application(backend='uia')
app = app.start(wpf_app_1)
self.app = app
self.dlg = app.WPFSampleApplication
self.edit = self.dlg.by(class_name="TextBox").find()
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_set_window_text(self):
"""Test setting text value of control (the text in textbox itself)"""
text_to_set = "This test"
self.assertRaises(UserWarning, self.edit.set_window_text, text_to_set)
self.assertEqual(self.edit.text_block(), text_to_set)
self.assertRaises(UserWarning, self.edit.set_window_text, " is done", True)
self.assertEqual(self.edit.text_block(), text_to_set + " is done")
def test_set_text(self):
"""Test setting the text of the edit control"""
self.edit.set_edit_text("Some text")
self.assertEqual(self.edit.text_block(), "Some text")
self.edit.set_edit_text(579)
self.assertEqual(self.edit.text_block(), "579")
self.edit.set_edit_text(333, pos_start=1, pos_end=2)
self.assertEqual(self.edit.text_block(), "53339")
def test_line_count(self):
"""Test getting the line count of the edit control"""
self.edit.set_edit_text("Here is some text")
self.assertEqual(self.edit.line_count(), 1)
def test_cet_line(self):
"""Test getting each line of the edit control"""
test_data = "Here is some text"
self.edit.set_edit_text(test_data)
self.assertEqual(self.edit.get_line(0), test_data)
def test_get_value(self):
"""Test getting value of the edit control"""
test_data = "Some value"
self.edit.set_edit_text(test_data)
self.assertEqual(self.edit.get_value(), test_data)
def test_text_block(self):
"""Test getting the text block of the edit control"""
test_data = "Here is some text"
self.edit.set_edit_text(test_data)
self.assertEqual(self.edit.text_block(), test_data)
def test_select(self):
"""Test selecting text in the edit control in various ways"""
self.edit.set_edit_text("Some text")
self.edit.select(0, 0)
self.assertEqual((0, 0), self.edit.selection_indices())
self.edit.select()
self.assertEqual((0, 9), self.edit.selection_indices())
self.edit.select(1, 7)
self.assertEqual((1, 7), self.edit.selection_indices())
self.edit.select(5, 2)
self.assertEqual((2, 5), self.edit.selection_indices())
self.edit.select("me t")
self.assertEqual((2, 6), self.edit.selection_indices())
self.assertRaises(RuntimeError, self.edit.select, "123")
class SliderWrapperTests(unittest.TestCase):
"""Unit tests for the EditWrapper class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
app = Application(backend='uia')
app = app.start(wpf_app_1)
self.app = app
self.dlg = app.WPFSampleApplication
self.slider = self.dlg.by(class_name="Slider").find()
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_min_value(self):
"""Test getting minimum value of the Slider"""
self.assertEqual(self.slider.min_value(), 0.0)
def test_max_value(self):
"""Test getting maximum value of the Slider"""
self.assertEqual(self.slider.max_value(), 100.0)
def test_small_change(self):
"""Test Getting small change of slider's thumb"""
self.assertEqual(self.slider.small_change(), 0.1)
def test_large_change(self):
"""Test Getting large change of slider's thumb"""
self.assertEqual(self.slider.large_change(), 1.0)
def test_value(self):
"""Test getting current position of slider's thumb"""
self.assertEqual(self.slider.value(), 70.0)
def test_set_value(self):
"""Test setting position of slider's thumb"""
self.slider.set_value(24)
self.assertEqual(self.slider.value(), 24.0)
self.slider.set_value(33.3)
self.assertEqual(self.slider.value(), 33.3)
self.slider.set_value("75.4")
self.assertEqual(self.slider.value(), 75.4)
self.assertRaises(ValueError, self.slider.set_value, -1)
self.assertRaises(ValueError, self.slider.set_value, 102)
self.assertRaises(ValueError, self.slider.set_value, [50, ])
class ListViewWrapperTests(unittest.TestCase):
"""Unit tests for the ListViewWrapper class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
app = Application(backend='uia')
app = app.start(wpf_app_1)
dlg = app.WPFSampleApplication
self.app = app
self.listview_tab = dlg.Tree_and_List_Views
self.listbox_datagrid_tab = dlg.ListBox_and_Grid
self.listview_texts = [
[u"1", u"Tomatoe", u"Red"],
[u"2", u"Cucumber", u"Green", ],
[u"3", u"Reddish", u"Purple", ],
[u"4", u"Cauliflower", u"White", ],
[u"5", u"Cupsicum", u"Yellow", ],
[u"6", u"Cupsicum", u"Red", ],
[u"7", u"Cupsicum", u"Green", ],
]
self.listbox_texts = [
[u"TextItem 1", ],
[u"TextItem 2", ],
[u"ButtonItem", ],
[u"CheckItem", ],
[u"TextItem 3", ],
[u"TextItem 4", ],
[u"TextItem 5", ],
[u"TextItem 6", ],
[u"TextItem 7", ],
[u"TextItem 8", ],
]
self.datagrid_texts = [
[u"0", u"A0", u"B0", u"C0", u"D0", u"E0", u"", ],
[u"1", u"A1", u"B1", u"C1", u"D1", u"E1", u"", ],
[u"2", u"A2", u"B2", u"C2", u"D2", u"E2", u"", ],
[u"3", u"A3", u"B3", u"C3", u"D3", u"E3", u"", ],
]
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_friendly_class_name(self):
"""Test friendly class name of the ListView controls"""
# ListView
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
self.assertEqual(listview.friendly_class_name(), u"ListView")
# ListBox
self.listbox_datagrid_tab.set_focus()
listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
self.assertEqual(listbox.friendly_class_name(), u"ListBox")
# DataGrid
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
self.assertEqual(datagrid.friendly_class_name(), u"ListView")
def test_item_count(self):
"""Test the items count in the ListView controls"""
# ListView
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
self.assertEqual(listview.item_count(), len(self.listview_texts))
# ListBox
self.listbox_datagrid_tab.set_focus()
#listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
# self.assertEqual(listbox.item_count(), len(self.listbox_texts))
# DataGrid
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
self.assertEqual(datagrid.item_count(), len(self.datagrid_texts))
def test_column_count(self):
"""Test the columns count in the ListView controls"""
# ListView
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
self.assertEqual(listview.column_count(), len(self.listview_texts[0]))
# ListBox
self.listbox_datagrid_tab.set_focus()
listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
self.assertEqual(listbox.column_count(), 0)
# DataGrid
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
self.assertEqual(datagrid.column_count(), len(self.datagrid_texts[0]) - 1)
def test_get_header_control(self):
"""Test getting a Header control and Header Item control of ListView controls"""
# ListView
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
hdr_ctl = listview.get_header_control()
self.assertTrue(isinstance(hdr_ctl, uia_ctls.HeaderWrapper))
# HeaderItem of ListView
hdr_itm = hdr_ctl.children()[2]
self.assertTrue(isinstance(hdr_itm, uia_ctls.HeaderItemWrapper))
self.assertTrue(hdr_itm.iface_transform.CurrentCanResize, True)
# ListBox
self.listbox_datagrid_tab.set_focus()
listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
self.assertEqual(listbox.get_header_control(), None)
# DataGrid
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
self.assertTrue(isinstance(datagrid.get_header_control(), uia_ctls.HeaderWrapper))
def test_get_column(self):
"""Test get_column() method for the ListView controls"""
# ListView
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
listview_col = listview.get_column(1)
self.assertEqual(listview_col.texts()[0], u"Name")
# ListBox
self.listbox_datagrid_tab.set_focus()
listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
self.assertRaises(IndexError, listbox.get_column, 0)
# DataGrid
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
datagrid_col = datagrid.get_column(2)
self.assertEqual(datagrid_col.texts()[0], u"B")
self.assertRaises(IndexError, datagrid.get_column, 10)
def test_cell(self):
"""Test getting a cell of the ListView controls"""
# ListView
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
cell = listview.cell(3, 2)
self.assertEqual(cell.window_text(), self.listview_texts[3][2])
# ListBox
self.listbox_datagrid_tab.set_focus()
listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
cell = listbox.cell(10, 10)
self.assertEqual(cell, None)
# DataGrid
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
cell = datagrid.cell(2, 0)
self.assertEqual(cell.window_text(), self.datagrid_texts[2][0])
self.assertRaises(TypeError, datagrid.cell, 1.5, 1)
self.assertRaises(IndexError, datagrid.cell, 10, 10)
def test_cells(self):
"""Test getting a cells of the ListView controls"""
def compare_cells(cells, control):
for i in range(0, control.item_count()):
for j in range(0, control.column_count()):
self.assertEqual(cells[i][j], control.cell(i, j))
# ListView
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
compare_cells(listview.cells(), listview)
# DataGrid
self.listbox_datagrid_tab.set_focus()
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
compare_cells(datagrid.cells(), datagrid)
# ListBox
self.listbox_datagrid_tab.set_focus()
listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
cells = listbox.cells()
self.assertEqual(cells[listbox.item_count() - 1][0].window_text(), "TextItem 7")
self.assertEqual(cells[3][0].window_text(), "CheckItem")
def test_get_item(self):
"""Test getting an item of ListView controls"""
# ListView
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
item = listview.get_item(u"Reddish")
self.assertEqual(item.texts(), self.listview_texts[2])
self.assertRaises(ValueError, listview.get_item, u"Apple")
# ListBox
self.listbox_datagrid_tab.set_focus()
listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
item = listbox.get_item(u"TextItem 2")
self.assertEqual(item.texts(), self.listbox_texts[1])
item = listbox.get_item(3)
self.assertEqual(item.texts(), self.listbox_texts[3])
item = listbox.get_item(u"TextItem 8")
self.assertEqual(item.texts(), self.listbox_texts[9])
# DataGrid
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
item = datagrid.get_item(u"B2")
self.assertEqual(item.texts(), self.datagrid_texts[2])
item = datagrid.get_item(3)
self.assertEqual(item.texts(), self.datagrid_texts[3])
self.assertRaises(TypeError, datagrid.get_item, 12.3)
def test_get_items(self):
"""Test getting all items of ListView controls"""
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
content = [item.texts() for item in listview.get_items()]
self.assertEqual(content, self.listview_texts)
# ListBox
self.listbox_datagrid_tab.set_focus()
listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
content = [item.texts() for item in listbox.get_items()]
# self.assertEqual(content, self.listbox_texts)
# DataGrid
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
content = [item.texts() for item in datagrid.get_items()]
self.assertEqual(content, self.datagrid_texts)
def test_texts(self):
"""Test getting all items of ListView controls"""
self.listview_tab.set_focus()
listview = self.listview_tab.children(class_name=u"ListView")[0]
self.assertEqual(listview.texts(), self.listview_texts)
# ListBox
self.listbox_datagrid_tab.set_focus()
#listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0]
# self.assertEqual(listbox.texts(), self.listbox_texts)
# DataGrid
datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0]
self.assertEqual(datagrid.texts(), self.datagrid_texts)
def test_select_and_get_item(self):
"""Test selecting an item of the ListView control"""
self.listview_tab.set_focus()
self.ctrl = self.listview_tab.children(class_name=u"ListView")[0]
# Verify get_selected_count
self.assertEqual(self.ctrl.get_selected_count(), 0)
# Select by an index
row = 1
i = self.ctrl.get_item(row)
self.assertEqual(i.is_selected(), False)
self.assertRaises(uia_defs.NoPatternInterfaceError, i.is_checked)
i.select()
self.assertEqual(i.is_selected(), True)
cnt = self.ctrl.get_selected_count()
self.assertEqual(cnt, 1)
rect = self.ctrl.get_item_rect(row)
self.assertEqual(rect, i.rectangle())
# Select by text
row = '3'
i = self.ctrl.get_item(row)
i.select()
self.assertEqual(i.is_selected(), True)
row = 'White'
i = self.ctrl.get_item(row)
i.select()
i = self.ctrl.get_item(3) # re-get the item by a row index
self.assertEqual(i.is_selected(), True)
row = None
self.assertRaises(TypeError, self.ctrl.get_item, row)
class ListViewWrapperTestsWinForms(unittest.TestCase):
"""Unit tests for the ListViewWrapper class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
self.app = Application(backend='uia').start(winfoms_app_grid)
self.dlg = self.app.Dialog
self.add_col_button = self.dlg.AddCol
self.add_row_button = self.dlg.AddRow
self.row_header_button = self.dlg.RowHeader
self.col_header_button = self.dlg.ColHeader
self.list_box = self.dlg.ListBox
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_list_box_item_selection(self):
"""Test get_item method"""
self.list_box.set_focus()
list_box_item = self.list_box.get_item('item (2)')
self.assertFalse(list_box_item.is_selected())
list_box_item.select()
self.assertTrue(list_box_item.is_selected())
def test_list_box_getitem_overload(self):
"""Test __getitem__ method"""
self.list_box.set_focus()
list_box_item = self.list_box['item (2)']
self.assertFalse(list_box_item.is_selected())
list_box_item.select()
self.assertTrue(list_box_item.is_selected())
def test_empty_grid(self):
"""Test some error cases handling"""
self.dlg.set_focus()
table = self.dlg.Table
self.assertEqual(len(table.cells()), 0)
self.assertRaises(IndexError, table.cell, 0, 0)
self.assertRaises(IndexError, table.get_item, 0)
def test_skip_headers(self):
"""Test some error cases handling"""
self.dlg.set_focus()
self.add_col_button.click()
table = self.dlg.Table
cells = table.cells()
self.assertEqual(len(cells), 1)
self.assertEqual(len(cells[0]), 1)
self.assertFalse(isinstance(cells[0][0], uia_ctls.HeaderWrapper))
def test_cell_and_cells_equals(self):
"""Test equivalence of cell and cells methods"""
def compare_cells():
table = self.dlg.Table
cells = table.cells()
self.assertEqual(len(cells), 3)
self.assertEqual(len(cells[0]), 2)
for row_ind in range(0, 3):
for col_ind in range(0, 2):
self.assertEqual(cells[row_ind][col_ind], table.cell(row_ind, col_ind))
self.add_col_button.click()
self.add_col_button.click()
self.add_row_button.click()
self.add_row_button.click()
compare_cells()
self.row_header_button.click()
compare_cells()
self.row_header_button.click()
self.col_header_button.click()
compare_cells()
def test_unsupported_columns(self):
"""Test raise NotImplemented errors for columns methods"""
self.dlg.set_focus()
table = self.dlg.Table
self.assertRaises(NotImplementedError, table.column_count)
self.assertRaises(NotImplementedError, table.get_column, 0)
def test_get_header_controls(self):
"""Test get header controls method"""
self.add_col_button.click()
table = self.dlg.Table
headers = table.get_header_controls()
self.assertEqual(len(headers), 3)
self.col_header_button.click()
headers = table.get_header_controls()
self.assertEqual(len(headers), 1)
self.row_header_button.click()
headers = table.get_header_controls()
self.assertEqual(len(headers), 0)
class MenuBarTestsWinForms(unittest.TestCase):
"""Unit tests for the MenuBar class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
self.app = Application(backend='uia').start(winfoms_app_grid)
self.dlg = self.app.Dialog
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_can_select_multiple_items(self):
"""Test menu_select multimple items with action"""
table = self.dlg.Table
cells = table.cells()
self.assertEqual(len(table.cells()), 0)
self.dlg.menu_select('#0 -> #1 -> #1 -> #0 -> #0 -> #4 ->#0')
cells = table.cells()
self.assertEqual(len(cells), 1)
self.assertEqual(len(cells[0]), 1)
def test_can_select_top_menu(self):
"""Test menu_select with single item"""
first_menu_item = self.dlg['menuStrip1'].children()[0]
point = first_menu_item.rectangle().mid_point()
child_from_point = self.dlg.from_point(point.x, point.y + 20)
self.assertEqual(child_from_point.element_info.name, 'Form1')
self.dlg.menu_select('tem1')
time.sleep(0.1)
child_from_point = self.dlg.from_point(point.x, point.y + 20)
self.assertEqual(child_from_point.element_info.name, 'tem1DropDown')
class EditTestsWinForms(unittest.TestCase):
"""Unit tests for the WinFormEdit class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
self.app = Application(backend='uia').start(winfoms_app_grid)
self.dlg = self.app.Dialog
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_readonly_and_editable_edits(self):
"""Test editable method for editable edit"""
self.assertEqual(self.dlg.Edit2.get_value(), "Editable")
self.assertTrue(self.dlg.Edit2.is_editable())
self.assertEqual(self.dlg.Edit1.get_value(), "ReadOnly")
self.assertFalse(self.dlg.Edit1.is_editable())
class ComboBoxTestsWinForms(unittest.TestCase):
"""Unit tests for the ComboBoxWrapper class with WinForms app"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
app = Application(backend='uia')
self.app = app.start(winfoms_app_grid)
self.dlg = dlg = app.Form1
self.combo_editable = dlg.by(auto_id="comboRowType", control_type="ComboBox").find()
self.combo_fixed = dlg.by(auto_id="comboBoxReadOnly", control_type="ComboBox").find()
self.combo_simple = dlg.by(auto_id="comboBoxSimple", control_type="ComboBox").find()
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_expand_collapse(self):
"""Test methods .expand() and .collapse() for WinForms combo box"""
self.dlg.set_focus()
test_data = [(self.combo_editable, 'editable'), (self.combo_fixed, 'fixed'), (self.combo_simple, 'simple')]
for combo, combo_name in test_data:
if combo != self.combo_simple:
self.assertFalse(combo.is_expanded(),
msg='{} combo box must be collapsed initially'.format(combo_name))
# test that method allows chaining
self.assertEqual(combo.expand(), combo,
msg='Method .expand() for {} combo box must return self'.format(combo_name))
self.assertTrue(combo.is_expanded(),
msg='{} combo box has not been expanded!'.format(combo_name))
# .expand() keeps already expanded state (and still allows chaining)
self.assertEqual(combo.expand(), combo,
msg='Method .expand() for {} combo box must return self, always!'.format(combo_name))
self.assertTrue(combo.is_expanded(),
msg='{} combo box does NOT keep expanded state!'.format(combo_name))
# collapse
self.assertEqual(combo.collapse(), combo,
msg='Method .collapse() for {} combo box must return self'.format(combo_name))
if combo != self.combo_simple:
self.assertFalse(combo.is_expanded(),
msg='{} combo box has not been collapsed!'.format(combo_name))
# collapse already collapsed should keep collapsed state
self.assertEqual(combo.collapse(), combo,
msg='Method .collapse() for {} combo box must return self, always!'.format(combo_name))
if combo != self.combo_simple:
self.assertFalse(combo.is_expanded(),
msg='{} combo box does NOT keep collapsed state!'.format(combo_name))
def test_texts(self):
"""Test method .texts() for WinForms combo box"""
self.dlg.set_focus()
editable_texts = [u'Numbers', u'Letters', u'Special symbols']
fixed_texts = [u'Item 1', u'Item 2', u'Last Item']
simple_texts = [u'Simple 1', u'Simple Two', u'The Simplest']
self.assertEqual(self.combo_editable.texts(), editable_texts)
self.assertEqual(self.combo_editable.expand().texts(), editable_texts)
self.assertTrue(self.combo_editable.is_expanded())
self.combo_editable.collapse()
self.assertEqual(self.combo_fixed.texts(), fixed_texts)
self.assertEqual(self.combo_fixed.expand().texts(), fixed_texts)
self.assertTrue(self.combo_fixed.is_expanded())
self.combo_fixed.collapse()
self.assertEqual(self.combo_simple.texts(), simple_texts)
self.assertEqual(self.combo_simple.expand().texts(), simple_texts)
self.assertTrue(self.combo_simple.is_expanded())
self.combo_simple.collapse()
def test_select(self):
"""Test method .select() for WinForms combo box"""
self.dlg.set_focus()
self.combo_editable.select(u'Letters')
self.assertEqual(self.combo_editable.selected_text(), u'Letters')
self.assertEqual(self.combo_editable.selected_index(), 1)
self.combo_editable.select(2)
self.assertEqual(self.combo_editable.selected_text(), u'Special symbols')
self.assertEqual(self.combo_editable.selected_index(), 2)
self.combo_fixed.select(u'Last Item')
self.assertEqual(self.combo_fixed.selected_text(), u'Last Item')
self.assertEqual(self.combo_fixed.selected_index(), 2)
self.combo_fixed.select(1)
self.assertEqual(self.combo_fixed.selected_text(), u'Item 2')
self.assertEqual(self.combo_fixed.selected_index(), 1)
self.combo_simple.select(u'The Simplest')
self.assertEqual(self.combo_simple.selected_text(), u'The Simplest')
self.assertEqual(self.combo_simple.selected_index(), 2)
self.combo_simple.select(0)
self.assertEqual(self.combo_simple.selected_text(), u'Simple 1')
self.assertEqual(self.combo_simple.selected_index(), 0)
def test_select_errors(self):
"""Test errors in method .select() for WinForms combo box"""
self.dlg.set_focus()
for combo in [self.combo_editable, self.combo_fixed, self.combo_simple]:
self.assertRaises(ValueError, combo.select, u'FFFF')
self.assertRaises(IndexError, combo.select, 50)
def test_item_count(self):
"""Test method .item_count() for WinForms combo box"""
self.dlg.set_focus()
self.assertEqual(self.combo_editable.item_count(), 3)
self.assertEqual(self.combo_fixed.item_count(), 3)
self.assertEqual(self.combo_simple.item_count(), 3)
def test_from_point(self):
"""Test method .from_point() for WinForms combo box"""
self.dlg.set_focus()
x, y = self.combo_fixed.rectangle().mid_point()
combo_from_point = self.dlg.from_point(x, y)
self.assertEqual(combo_from_point, self.combo_fixed)
combo2_from_point = Desktop(backend="uia").from_point(x, y)
self.assertEqual(combo2_from_point, self.combo_fixed)
def test_top_from_point(self):
"""Test method .top_from_point() for WinForms combo box"""
dlg_wrapper = self.dlg.set_focus()
x, y = self.combo_fixed.rectangle().mid_point()
dlg_from_point = self.dlg.top_from_point(x, y)
self.assertEqual(dlg_from_point, dlg_wrapper)
dlg2_from_point = Desktop(backend="uia").top_from_point(x, y)
self.assertEqual(dlg2_from_point, dlg_wrapper)
if sys.version_info[:2] >= (3, 6):
class ComboBoxTestsQt(unittest.TestCase):
"""Unit tests for the ComboBoxWrapper class with PyQt5 app"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
app = Application(backend='uia').start(qt_py_combobox_app, wait_for_idle=False)
self.app = Application(backend='uia').connect(pid=app.process)
self.dlg = dlg = self.app.window(name='QTRV')
self.combo1 = dlg.by(name="Q1", control_type="ComboBox").find()
self.combo2 = dlg.by(name="Q2", control_type="ComboBox").find()
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_select(self):
"""Test method .select() for Qt combo box"""
self.dlg.set_focus()
self.combo1.select(u'Image on right')
self.assertEqual(self.combo1.selected_text(), u'Image on right')
self.assertEqual(self.combo1.selected_index(), 1)
self.combo1.select(2)
self.assertEqual(self.combo1.selected_text(), u'Image on top')
self.assertEqual(self.combo1.selected_index(), 2)
self.combo2.select(u'Image and Text')
self.assertEqual(self.combo2.selected_text(), u'Image and Text')
self.assertEqual(self.combo2.selected_index(), 2)
self.combo2.select(0)
self.assertEqual(self.combo2.selected_text(), u'Image')
self.assertEqual(self.combo2.selected_index(), 0)
class ListItemWrapperTests(unittest.TestCase):
"""Unit tests for the ListItemWrapper class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
app = Application(backend='uia')
app = app.start(wpf_app_1)
dlg = app.WPFSampleApplication
self.app = app
self.listview_tab = dlg.Tree_and_List_Views
self.listbox_datagrid_tab = dlg.ListBox_and_Grid
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_friendly_class_name(self):
"""Test getting friendly class name"""
# DataItem
self.listview_tab.set_focus()
listview_item = self.listview_tab.children(class_name=u"ListView")[0].get_item(2)
self.assertEqual(listview_item.friendly_class_name(), u"DataItem")
# ListBoxItem
self.listbox_datagrid_tab.set_focus()
listbox_item = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0].get_item(3)
self.assertEqual(listbox_item.friendly_class_name(), u"ListItem")
# DataGridRow
datagrid_row = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0].get_item(1)
self.assertEqual(datagrid_row.friendly_class_name(), u"DataItem")
def test_selection(self):
"""Test selection of ListItem"""
self.listview_tab.set_focus()
listview_item = self.listview_tab.children(class_name=u"ListView")[0].get_item(2)
self.assertFalse(listview_item.is_selected())
listview_item.select()
self.assertTrue(listview_item.is_selected())
def test_is_checked(self):
"""Test is_checked() method of ListItemWrapper"""
self.listbox_datagrid_tab.set_focus()
listbox_item = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0].get_item(u"CheckItem")
self.assertRaises(uia_defs.NoPatternInterfaceError, listbox_item.is_checked)
def test_texts(self):
"""Test getting texts of ListItem"""
self.listview_tab.set_focus()
listview_item = self.listview_tab.children(class_name=u"ListView")[0].get_item(1)
texts = [u"2", u"Cucumber", u"Green"]
self.assertEqual(listview_item.texts(), texts)
class MenuWrapperWpfTests(unittest.TestCase):
"""Unit tests for the MenuWrapper class on WPF demo"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
self.app = Application(backend='uia')
self.app = self.app.start(wpf_app_1)
self.dlg = self.app.WPFSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_menu_by_index(self):
"""Test selecting a WPF menu item by index"""
path = "#0->#1->#1" # "File->Close->Later"
self.dlg.menu_select(path)
label = self.dlg.MenuLaterClickStatic.find()
self.assertEqual(label.window_text(), u"MenuLaterClick")
# Non-existing paths
path = "#5->#1"
self.assertRaises(IndexError, self.dlg.menu_select, path)
path = "#0->#1->#1->#2->#3"
self.assertRaises(IndexError, self.dlg.menu_select, path)
def test_menu_by_exact_text(self):
"""Test selecting a WPF menu item by exact text match"""
path = "File->Close->Later"
self.dlg.menu_select(path, True)
label = self.dlg.MenuLaterClickStatic.find()
self.assertEqual(label.window_text(), u"MenuLaterClick")
# A non-exact menu name
path = "File->About"
self.assertRaises(IndexError, self.dlg.menu_select, path, True)
def test_menu_by_best_match_text(self):
"""Test selecting a WPF menu item by best match text"""
path = "file-> close -> later"
self.dlg.menu_select(path, False)
label = self.dlg.MenuLaterClickStatic.find()
self.assertEqual(label.window_text(), u"MenuLaterClick")
def test_menu_by_mixed_match(self):
"""Test selecting a WPF menu item by a path with mixed specifiers"""
path = "file-> #1 -> later"
self.dlg.menu_select(path, False)
label = self.dlg.MenuLaterClickStatic.find()
self.assertEqual(label.window_text(), u"MenuLaterClick")
# Bad specifiers
path = "file-> 1 -> later"
self.assertRaises(IndexError, self.dlg.menu_select, path)
path = "#0->#1->1"
self.assertRaises(IndexError, self.dlg.menu_select, path)
path = "0->#1->1"
self.assertRaises(IndexError, self.dlg.menu_select, path)
class MenuWrapperNotepadTests(unittest.TestCase):
"""Unit tests for the MenuWrapper class on Notepad"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
# start the application
self.app = Application(backend='uia')
self.app = self.app.start("notepad.exe")
self.dlg = self.app.UntitledNotepad
ActionLogger().log("MenuWrapperNotepadTests::setUp, wait till Notepad dialog is ready")
self.dlg.wait("ready")
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_friendly_class_name(self):
"""Test getting the friendly class name of the menu"""
menu = self.dlg.descendants(control_type="MenuBar")[0]
self.assertEqual(menu.friendly_class_name(), "Menu")
def test_menu_by_index(self):
"""Test selecting a menu item by index"""
path = "#4->#1" # "Help->About Notepad"
self.dlg.menu_select(path)
# 'About Notepad' dialog showed upon execution of menu_select
self.assertEqual(self.dlg.AboutNotepad.is_active(), True)
# menu_select rises the AttributeError when a dialog doesn't have menus
self.assertRaises(AttributeError, self.dlg.AboutNotepad.menu_select, "#10->#2")
self.dlg.AboutNotepad.close()
# A non-existing path
path = "#5->#1"
self.assertRaises(IndexError, self.dlg.menu_select, path)
# Get a menu item by index
menu = self.dlg.children(control_type="MenuBar")[0]
item = menu.item_by_index(4)
self.assertEqual(isinstance(item, uia_ctls.MenuItemWrapper), True)
self.assertEqual(item.window_text(), 'Help')
item.select()
item.close()
def test_is_dialog(self):
"""Test that method is_dialog() works as expected"""
self.assertEqual(self.dlg.is_dialog(), True)
self.assertEqual(self.dlg.Edit.is_dialog(), False)
def test_issue_532(self):
"""Test selecting a combobox item when it's wrapped in ListView"""
path = "Format -> Font"
self.dlg.menu_select(path)
combo_box = self.app.top_window().Font.ScriptComboBox.find()
combo_box.select('Greek')
self.assertEqual(combo_box.selected_text(), 'Greek')
self.assertRaises(ValueError, combo_box.select, 'NonExistingScript')
def test_menu_by_exact_text(self):
"""Test selecting a menu item by exact text match"""
path = "Help->About Notepad"
self.dlg.menu_select(path, True)
self.assertEqual(self.dlg.AboutNotepad.is_dialog(), True)
self.dlg.AboutNotepad.close()
# A non-exact menu name
path = "help ->About Notepad"
self.assertRaises(IndexError, self.dlg.menu_select, path, True)
def test_menu_by_best_match_text(self):
"""Test selecting a Win32 menu item by best match text"""
path = "help->aboutnotepad"
self.dlg.menu_select(path, False)
self.dlg.AboutNotepad.close()
path = "Help ->about notepad "
self.dlg.menu_select(path, False)
self.dlg.AboutNotepad.close()
# Bad match
path = "HELP -> About Notepad"
self.assertRaises(IndexError, self.dlg.menu_select, path)
path = "help -> ABOUT NOTEPAD"
self.assertRaises(IndexError, self.dlg.menu_select, path)
path = "help -> # 2"
self.assertRaises(IndexError, self.dlg.menu_select, path)
def test_menu_by_mixed_match(self):
"""Test selecting a menu item by a path with mixed specifiers"""
path = "#4->aboutnotepad"
self.dlg.menu_select(path, False)
self.dlg.AboutNotepad.close()
# An index and the exact text match
path = "Help->#1"
self.dlg.menu_select(path, True)
self.dlg.AboutNotepad.close()
# An index and non-exact text match
path = "#4 -> about notepad "
self.dlg.menu_select(path, False)
self.dlg.AboutNotepad.close()
# Bad specifiers
path = "#0->#1->1"
self.assertRaises(IndexError, self.dlg.menu_select, path)
path = "0->#1->1"
self.assertRaises(IndexError, self.dlg.menu_select, path)
path = " -> #1 -> #2"
self.assertRaises(IndexError, self.dlg.menu_select, path)
class ToolbarWpfTests(unittest.TestCase):
"""Unit tests for ToolbarWrapper class on WPF demo"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
self.app = Application(backend='uia')
self.app = self.app.start(wpf_app_1)
self.dlg = self.app.WPFSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_button_access_wpf(self):
"""Test getting access to buttons on Toolbar of WPF demo"""
# Read a second toolbar with buttons: "button1, button2"
tb = self.dlg.Toolbar2.find()
self.assertEqual(tb.button_count(), 5)
self.assertEqual(len(tb.texts()), 5)
# Test if it's in writable properties
props = set(tb.get_properties().keys())
self.assertEqual('button_count' in props, True)
expect_txt = "button 1"
self.assertEqual(tb.button(3).window_text(), expect_txt)
found_txt = tb.button(expect_txt, exact=True).window_text()
self.assertEqual(found_txt, expect_txt)
found_txt = tb.button("b 1", exact=False).window_text()
self.assertEqual(found_txt, expect_txt)
expect_txt = "button 2"
found_txt = tb.button(expect_txt, exact=True).window_text()
self.assertEqual(found_txt, expect_txt)
expect_txt = ""
btn = tb.button(expect_txt, exact=True)
found_txt = btn.window_text()
self.assertEqual(found_txt, expect_txt)
# Notice that findbestmatch.MatchError is subclassed from IndexError
self.assertRaises(IndexError, tb.button, "BaD n_$E ", exact=False)
class ToolbarNativeTests(unittest.TestCase):
"""Unit tests for ToolbarWrapper class on a native application"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='uia')
self.app.start(os.path.join(mfc_samples_folder, u"RowList.exe"))
self.dlg = self.app.RowListSampleApplication
self.ctrl = self.dlg.ToolBar.find()
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_tooltips(self):
"""Test working with tooltips"""
self.ctrl.set_focus()
self.ctrl.move_mouse_input(coords=(10, 10), absolute=False)
# Find a tooltip by class name
tt = self.app.window(top_level_only=False,
class_name="tooltips_class32").wait('visible')
self.assertEqual(isinstance(tt, uia_ctls.TooltipWrapper), True)
self.assertEqual(tt.window_text(), "Large Icons")
# Find a tooltip window by control type
tt = self.app.top_window().children(control_type='ToolTip')[0]
self.assertEqual(isinstance(tt, uia_ctls.TooltipWrapper), True)
self.assertEqual(tt.window_text(), "Large Icons")
def test_button_click(self):
"""Test button click"""
# Check the "Full Row Details" button
self.ctrl.check_button("Full Row Details", True)
lst_ctl = self.dlg.ListBox
itm = lst_ctl.children()[1]
self.assertEqual(itm.texts()[0], u'Yellow')
# Check the second time it shouldn't change
self.ctrl.check_button("Full Row Details", True)
self.assertEqual(itm.texts()[0], u'Yellow')
# Switch to another view
self.ctrl.check_button("Small Icons", True)
itm = lst_ctl.children()[1]
self.assertEqual(itm.texts()[0], u'Red')
class ToolbarMfcTests(unittest.TestCase):
"""Unit tests for ToolbarWrapper class on MFC demo"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
self.app = Application(backend='uia').start(mfc_app_rebar_test)
self.dlg = self.app.RebarTest
self.menu_bar = self.dlg.MenuBar.find()
self.toolbar = self.dlg.StandardToolbar.find()
self.window_edge_point = (self.dlg.rectangle().width() + 50, self.dlg.rectangle().height() + 50)
def tearDown(self):
"""Close the application after tests"""
self.menu_bar.move_mouse_input(coords=self.window_edge_point, absolute=False)
self.app.kill()
def test_button_access_mfc(self):
"""Test getting access to buttons on Toolbar for MFC demo"""
# Read a first toolbar with buttons: "File, View, Help"
self.assertEqual(self.menu_bar.button_count(), 4)
self.assertEqual(self.toolbar.button_count(), 11)
# Test if it's in writable properties
props = set(self.menu_bar.get_properties().keys())
self.assertEqual('button_count' in props, True)
self.assertEqual("File", self.menu_bar.button(0).window_text())
self.assertEqual("View", self.menu_bar.button(1).window_text())
self.assertEqual("Help", self.menu_bar.button(2).window_text())
found_txt = self.menu_bar.button("File", exact=True).window_text()
self.assertEqual("File", found_txt)
found_txt = self.menu_bar.button("File", exact=False).window_text()
self.assertEqual("File", found_txt)
def test_texts(self):
"""Test method .texts() for MFC Toolbar"""
self.assertEqual(self.menu_bar.texts(), [u'File', u'View', u'Help', u'Help'])
self.assertEqual(self.toolbar.texts(), [u'New', u'Open', u'Save', u'Save',
u'Cut', u'Copy', u'Paste', u'Paste', u'Print', u'About', u'About'])
class TreeViewWpfTests(unittest.TestCase):
"""Unit tests for TreeViewWrapper class on WPF demo"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
# start the application
self.app = Application(backend='uia')
self.app = self.app.start(wpf_app_1)
self.dlg = self.app.WPFSampleApplication
tab_itm = self.dlg.TreeAndListViews.set_focus()
self.ctrl = tab_itm.children(control_type="Tree")[0]
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_tv_item_count_and_roots(self):
"""Test getting roots and a total number of items in TreeView"""
# By default the tree view on WPF demo is partially expanded
# with only 12 visible nodes
self.assertEqual(self.ctrl.item_count(), 12)
# Test if it's in writable properties
props = set(self.ctrl.get_properties().keys())
self.assertEqual('item_count' in props, True)
roots = self.ctrl.roots()
self.assertEqual(len(roots), 1)
self.assertEqual(roots[0].texts()[0], u'Date Elements')
sub_items = roots[0].sub_elements()
self.assertEqual(len(sub_items), 11)
self.assertEqual(sub_items[0].window_text(), u'Empty Date')
self.assertEqual(sub_items[-1].window_text(), u'Years')
expected_str = "Date Elements\n Empty Date\n Week\n Monday\n Tuesday\n Wednsday\n"
expected_str += " Thursday\n Friday\n Saturday\n Sunday\n Months\n Years\n"
self.assertEqual(self.ctrl.print_items(), expected_str)
def test_tv_item_select(self):
"""Test selecting an item from TreeView"""
# Find by a path with indexes
itm = self.ctrl.get_item((0, 2, 3))
self.assertEqual(itm.is_selected(), False)
# Select
itm.select()
self.assertEqual(itm.is_selected(), True)
# A second call to Select doesn't remove selection
itm.select()
self.assertEqual(itm.is_selected(), True)
itm = self.ctrl.get_item((0, 3, 2))
itm.ensure_visible()
self.assertEqual(itm.is_selected(), False)
coords = itm.children(control_type='Text')[0].rectangle().mid_point()
itm.click_input(coords=coords, absolute=True)
self.assertEqual(itm.is_selected(), True)
def test_tv_get_item(self):
"""Test getting an item from TreeView"""
# Find by a path with indexes
itm = self.ctrl.get_item((0, 2, 3))
self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True)
self.assertEqual(itm.window_text(), u'April')
# Find by a path with strings
itm = self.ctrl.get_item('\\Date Elements\\Months\\April', exact=True)
self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True)
self.assertEqual(itm.window_text(), u'April')
itm = self.ctrl.get_item('\\ Date Elements \\ months \\ april', exact=False)
self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True)
self.assertEqual(itm.window_text(), u'April')
itm = self.ctrl.get_item('\\Date Elements', exact=False)
self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True)
self.assertEqual(itm.window_text(), u'Date Elements')
# Try to find the last item in the tree hierarchy
itm = self.ctrl.get_item('\\Date Elements\\Years\\2018', exact=False)
self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True)
self.assertEqual(itm.window_text(), u'2018')
itm = self.ctrl.get_item((0, 3, 3))
self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True)
self.assertEqual(itm.window_text(), u'2018')
# Verify errors handling
self.assertRaises(uia_defs.NoPatternInterfaceError, itm.is_checked)
self.assertRaises(RuntimeError,
self.ctrl.get_item,
'Date Elements\\months',
exact=False)
self.assertRaises(IndexError,
self.ctrl.get_item,
'\\_X_- \\months',
exact=False)
self.assertRaises(IndexError,
self.ctrl.get_item,
'\\_X_- \\ months',
exact=True)
self.assertRaises(IndexError,
self.ctrl.get_item,
'\\Date Elements\\ months \\ aprel',
exact=False)
self.assertRaises(IndexError,
self.ctrl.get_item,
'\\Date Elements\\ months \\ april\\',
exact=False)
self.assertRaises(IndexError,
self.ctrl.get_item,
'\\Date Elements\\ months \\ aprel',
exact=True)
self.assertRaises(IndexError, self.ctrl.get_item, (0, 200, 1))
self.assertRaises(IndexError, self.ctrl.get_item, (130, 2, 1))
def test_tv_drag_n_drop(self):
"""Test moving an item with mouse over TreeView"""
# Make sure the both nodes are visible
self.ctrl.get_item('\\Date Elements\\weeks').collapse()
itm_from = self.ctrl.get_item('\\Date Elements\\Years')
itm_to = self.ctrl.get_item('\\Date Elements\\Empty Date')
itm_from.drag_mouse_input(itm_to)
# Verify that the item and its sub-items are attached to the new node
itm = self.ctrl.get_item('\\Date Elements\\Empty Date\\Years')
self.assertEqual(itm.window_text(), 'Years')
itm = self.ctrl.get_item((0, 0, 0, 0))
self.assertEqual(itm.window_text(), '2015')
itm = self.ctrl.get_item('\\Date Elements\\Empty Date\\Years')
itm.collapse()
itm_from = self.ctrl.get_item('\\Date Elements\\Empty Date\\Years')
itm_to = self.ctrl.get_item(r'\Date Elements\Months')
self.ctrl.drag_mouse_input(itm_to, itm_from)
itm = self.ctrl.get_item(r'\Date Elements\Months\Years')
self.assertEqual(itm.window_text(), 'Years')
# Error handling: drop on itself
self.assertRaises(AttributeError,
self.ctrl.drag_mouse_input,
itm_from, itm_from)
# Drag-n-drop by manually calculated absolute coordinates
itm_from = self.ctrl.get_item(r'\Date Elements\Months')
itm_from.collapse()
r = itm_from.rectangle()
coords_from = (int(r.left + (r.width() / 4.0)),
int(r.top + (r.height() / 2.0)))
r = self.ctrl.get_item(r'\Date Elements\Weeks').rectangle()
coords_to = (int(r.left + (r.width() / 4.0)),
int(r.top + (r.height() / 2.0)))
self.ctrl.drag_mouse_input(coords_to, coords_from)
itm = self.ctrl.get_item(r'\Date Elements\Weeks\Months')
self.assertEqual(itm.window_text(), 'Months')
class WindowWrapperTests(unittest.TestCase):
"""Unit tests for the UIAWrapper class for Window elements"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
test_folder = os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))), r"apps/MouseTester")
self.qt5_app = os.path.join(test_folder, "mousebuttons.exe")
# start the application
self.app = Application(backend='uia')
self.app = self.app.start(self.qt5_app)
self.dlg = self.app.MouseButtonTester.find()
self.another_app = None
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
if self.another_app:
self.another_app.kill()
self.another_app = None
def test_issue_443(self):
"""Test .set_focus() for window that is not keyboard focusable"""
self.dlg.minimize()
wait_until(1, 0.2, self.dlg.is_minimized)
self.dlg.set_focus()
wait_until(1, 0.2, self.dlg.is_minimized, value=False)
self.assertEqual(self.dlg.is_normal(), True)
# run another app instance (in focus now)
self.another_app = Application(backend="win32").start(self.qt5_app)
# eliminate clickable point at original app by maximizing second window
self.another_app.MouseButtonTester.maximize()
self.another_app.MouseButtonTester.set_focus()
self.assertEqual(self.another_app.MouseButtonTester.has_focus(), True)
self.dlg.set_focus()
# another app instance has lost focus
self.assertEqual(self.another_app.MouseButtonTester.has_focus(), False)
# our window has been brought to the focus (clickable point exists)
self.assertEqual(self.dlg.element_info.element.GetClickablePoint()[-1], 1)
if __name__ == "__main__":
if UIA_support:
unittest.main()
|
pywinauto/pywinauto
|
pywinauto/unittests/test_uiawrapper.py
|
Python
|
bsd-3-clause
| 96,696
|
{% block meta %}
name: SleepState
description:
SMACH state that pauses state machine execution for a specified length of
time.
language: Python
framework: SMACH
type: State
tags: [core]
includes:
- State
extends: []
variables:
- time:
description: The length of time to sleep for in seconds.
type: float
input_keys: []
output_keys: []
outcomes:
- succeeded
{% endblock meta %}
{% from "Utils.tpl.py" import import_module, render_transitions %}
{% extends "State.tpl.py" %}
{% block imports %}
{{ super() }}
{{ import_module(defined_headers, 'rospy') }}
{% endblock imports %}
{% block class_defs %}
{{ super() }}
{% if 'class_SleepState' not in defined_headers %}
class SleepState(smach.State):
def __init__(self, time, input_keys = [], output_keys = [], callbacks = [], outcomes=['succeeded']):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=outcomes)
self._time = time
def execute(self, userdata):
rospy.sleep(self._time)
return 'succeeded'
{% do defined_headers.append('class_SleepState') %}{% endif %}
{% endblock class_defs %}
{% block body %}
smach.{{ parent_type }}.add('{{ name }}',
{{ '' | indent(23, true) }}SleepState({{ time }}){% if transitions is defined %},
{{ render_transitions(transitions) }}{% endif %})
{% endblock body %}
|
ReconCell/smacha
|
smacha_ros/src/smacha_ros/templates/SleepState.tpl.py
|
Python
|
bsd-3-clause
| 1,351
|
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import Release
@register(Release)
class ReleaseSerializer(Serializer):
def serialize(self, obj, attrs, user):
d = {
'id': str(obj.id),
'version': obj.version,
'dateCreated': obj.date_added,
}
return d
|
camilonova/sentry
|
src/sentry/api/serializers/models/release.py
|
Python
|
bsd-3-clause
| 382
|
# -*- coding: utf-8 -*-
"""Tests of misc type utility functions."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal as ae
from pytest import raises, mark
from ..kwik.mock import artificial_spike_clusters
from ..utils import (Bunch, _is_integer, captured_output,
_load_arrays, _save_arrays,
_pad,
_spikes_per_cluster,
get_excerpts,
chunk_bounds,
data_chunk,
excerpts,
)
#------------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------------
def test_bunch():
obj = Bunch()
obj['a'] = 1
assert obj.a == 1
obj.b = 2
assert obj['b'] == 2
def test_integer():
assert _is_integer(3)
assert _is_integer(np.arange(1)[0])
assert not _is_integer(3.)
def test_captured_output():
with captured_output() as (out, err):
print('Hello world!')
assert out.getvalue().strip() == 'Hello world!'
@mark.parametrize('n', [20, 0])
def test_load_save_arrays(tempdir, n):
path = op.join(tempdir, 'test.npy')
# Random arrays.
arrays = []
for i in range(n):
size = np.random.randint(low=3, high=50)
assert size > 0
arr = np.random.rand(size, 3).astype(np.float32)
arrays.append(arr)
_save_arrays(path, arrays)
arrays_loaded = _load_arrays(path)
assert len(arrays) == len(arrays_loaded)
for arr, arr_loaded in zip(arrays, arrays_loaded):
assert arr.shape == arr_loaded.shape
assert arr.dtype == arr_loaded.dtype
ae(arr, arr_loaded)
def test_pad():
arr = np.random.rand(10, 3)
ae(_pad(arr, 0, 'right'), arr[:0, :])
ae(_pad(arr, 3, 'right'), arr[:3, :])
ae(_pad(arr, 9), arr[:9, :])
ae(_pad(arr, 10), arr)
ae(_pad(arr, 12, 'right')[:10, :], arr)
ae(_pad(arr, 12)[10:, :], np.zeros((2, 3)))
ae(_pad(arr, 0, 'left'), arr[:0, :])
ae(_pad(arr, 3, 'left'), arr[7:, :])
ae(_pad(arr, 9, 'left'), arr[1:, :])
ae(_pad(arr, 10, 'left'), arr)
ae(_pad(arr, 12, 'left')[2:, :], arr)
ae(_pad(arr, 12, 'left')[:2, :], np.zeros((2, 3)))
with raises(ValueError):
_pad(arr, -1)
def test_spikes_per_cluster():
"""Test _spikes_per_cluster()."""
n_spikes = 1000
spike_ids = np.arange(n_spikes).astype(np.int64)
n_clusters = 10
spike_clusters = artificial_spike_clusters(n_spikes, n_clusters)
spikes_per_cluster = _spikes_per_cluster(spike_ids, spike_clusters)
assert list(spikes_per_cluster.keys()) == list(range(n_clusters))
for i in range(10):
ae(spikes_per_cluster[i], np.sort(spikes_per_cluster[i]))
assert np.all(spike_clusters[spikes_per_cluster[i]] == i)
#------------------------------------------------------------------------------
# Test chunking
#------------------------------------------------------------------------------
def test_chunk_bounds():
chunks = chunk_bounds(200, 100, overlap=20)
assert next(chunks) == (0, 100, 0, 90)
assert next(chunks) == (80, 180, 90, 170)
assert next(chunks) == (160, 200, 170, 200)
def test_chunk():
data = np.random.randn(200, 4)
chunks = chunk_bounds(data.shape[0], 100, overlap=20)
with raises(ValueError):
data_chunk(data, (0, 0, 0))
assert data_chunk(data, (0, 0)).shape == (0, 4)
# Chunk 1.
ch = next(chunks)
d = data_chunk(data, ch)
d_o = data_chunk(data, ch, with_overlap=True)
ae(d_o, data[0:100])
ae(d, data[0:90])
# Chunk 2.
ch = next(chunks)
d = data_chunk(data, ch)
d_o = data_chunk(data, ch, with_overlap=True)
ae(d_o, data[80:180])
ae(d, data[90:170])
def test_excerpts_1():
bounds = [(start, end) for (start, end) in excerpts(100,
n_excerpts=3,
excerpt_size=10)]
assert bounds == [(0, 10), (45, 55), (90, 100)]
def test_excerpts_2():
bounds = [(start, end) for (start, end) in excerpts(10,
n_excerpts=3,
excerpt_size=10)]
assert bounds == [(0, 10)]
def test_get_excerpts():
data = np.random.rand(100, 2)
subdata = get_excerpts(data, n_excerpts=10, excerpt_size=5)
assert subdata.shape == (50, 2)
ae(subdata[:5, :], data[:5, :])
ae(subdata[-5:, :], data[-10:-5, :])
data = np.random.rand(10, 2)
subdata = get_excerpts(data, n_excerpts=10, excerpt_size=5)
ae(subdata, data)
|
kwikteam/klusta
|
klusta/tests/test_utils.py
|
Python
|
bsd-3-clause
| 4,913
|
from django.template import Library
from django.template.defaultfilters import stringfilter
from math import ceil
register = Library()
@register.filter
def columns(thelist, n):
"""
Split a list into `n` columns
"""
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
list_len = len(thelist)
split = float(list_len) / n
return [thelist[int(ceil(i * split)):int(ceil((i + 1) * split))]
for i in xrange(n)]
@register.filter()
def truncate(s, max_len):
"""
Truncates a string after a certain number of letters
"""
try:
length = int(max_len)
except ValueError:
return s
if len(s) > length:
return s[:length] + '...'
else:
return s[:length]
truncate.is_safe = True
truncate = stringfilter(truncate)
|
babsey/django-forums-bootstrap
|
webapp/templatetags/webapp_filters.py
|
Python
|
bsd-3-clause
| 867
|
# -*- coding: utf-8 -*-
from django.utils.datastructures import SortedDict
from django.db.models.sql.where import ExtraWhere
from .sql.tree import AND, OR
from .sql.utils import _setup_joins_for_fields
class StatementMixIn(object):
def annotate_functions(self, **kwargs):
extra_select, params = SortedDict(), []
clone = self._clone()
for alias, node in kwargs.iteritems():
_sql, _params = node.as_sql(self.quote_name, self)
extra_select[alias] = _sql
params.extend(_params)
clone.query.add_extra(extra_select, params, None, None, None, None)
return clone
def where(self, *args):
clone = self._clone()
statement = AND(*args)
_sql, _params = statement.as_sql(self.quote_name, clone)
if hasattr(_sql, 'to_str'):
_sql = _sql.to_str()
clone.query.where.add(ExtraWhere([_sql], _params), "AND")
return clone
|
cr8ivecodesmith/django-orm-extensions-save22
|
django_orm/core/queryset.py
|
Python
|
bsd-3-clause
| 955
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import include
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls import url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Local application / specific library imports
from cross_site_urls.conf import settings as cross_site_settings
admin.autodiscover()
urlpatterns = i18n_patterns(
'',
url((r'^{}').format(cross_site_settings.DEFAULT_API_URL), include('cross_site_urls.urls')),
)
urlpatterns += staticfiles_urlpatterns()
|
kapt-labs/django-cross-site-urls
|
tests/_testsite/urls.py
|
Python
|
bsd-3-clause
| 598
|
from __future__ import division, absolute_import, print_function
import copy
import pickle
import sys
import platform
import gc
import copy
import warnings
import tempfile
from os import path
from io import BytesIO
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a, b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self,level=rlevel):
"""Check that nothing is done when order='F' and array C/F-contiguous"""
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self,level=rlevel):
"""Check that ravel works when order='F' and array C/F-contiguous"""
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError, rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
"""Ticket #72"""
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = sixu('\U0010FFFF')
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError, np.dtype,
{'names':['a'],'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and
platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
self.assertRaises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self,level=rlevel):
"""GitHub issue #369"""
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self,level=rlevel):
"""Ticket #2185"""
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert_(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
self.assertRaises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3, 1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3, dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
f = BytesIO()
pickle.dump(dt, f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,), 5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x", "x ", "x "))
for c in x: assert_equal(c, "x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence(object):
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")),
(np.array([9e123], dtype=np.float64),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")),
(np.array([(9e123,)], dtype=[('name', float)]),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")),
]
if sys.version_info[:2] >= (3, 4):
# encoding='bytes' was added in Py3.4
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1, 'A', None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self, level=rlevel):
"""Issue #465 and related checks"""
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self, level=rlevel):
"""Issue #380, test reshaping of zero strided arrays"""
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self, level=rlevel):
"""GitHub Issue #2700, setting shape failed for 0-sized arrays"""
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
@dec.skipif(not hasattr(sys, 'getrefcount'))
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError, np.convolve, [], [1])
self.assertRaises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
sstr = np.array_str(s)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(list(range(16)))
@dec.skipif('__pypy__' in sys.builtin_module_names)
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
self.assertRaises(TypeError, rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v): x[(s>0)]=v
# After removing deprecation, the following are ValueErrors.
# This might seem odd as compared to the value error below. This
# is due to the fact that the new code always uses "nonzero" logic
# and the boolean special case is not taken.
self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:,:] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr3 = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
arr3 = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
arr3 = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
arr3 = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1, 2, 3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30, 40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.nonzero()[0].info == 'jubba')
assert_(dat.nonzero()[1].info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1', '2', '3']))
assert_equal(a, b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1, 9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a', 'S%d'%i), ('b', 'U2')])
x = np.array([(asbytes('a'), sixu('b'))], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0, 1], [2, 3]])
self.assertRaises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
with np.errstate(all="ignore"):
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
@dec.skipif('__pypy__' in sys.builtin_module_names)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
#Test pickle and unpickle of void and record scalars
assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array(
[ [sixu('abc'), sixu('\u03a3')],
[sixu('asdf'), sixu('erw')]
], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', sixu('123')])
assert_(a.itemsize == 16)
a = np.array([sixu('123'), '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', sixu('123'), '12345'])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('12345')])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('1234')])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259 and gh-441"""
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
stra = 'aaaa'
strb = 'bbbb'
if hasattr(sys, 'getrefcount'):
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
if hasattr(sys, 'getrefcount'):
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
# ignore complex warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError as e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
if hasattr(sys, 'getrefcount'):
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
if hasattr(sys, 'getrefcount'):
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
"""Ticket #2218"""
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
assert_equal(np.dtype('S10').itemsize, 10)
A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
# This was throwing an exception because in ctors.c,
# discover_itemsize was calling PyObject_Length without checking
# the return code. This failed to get the length of the number 2,
# and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
"""Similar to GitHub issue #387"""
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_reduce_contiguous(self):
"""GitHub issue #387"""
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# Test the same for a circular reference.
b = np.array(a, dtype=object)
a[()] = b
assert_raises(TypeError, int, a)
# Numpy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = 0
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
assert_equal(sys.getrefcount(a[()]), 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield directly manipulates the raw array data,
# so is invalid for object arrays.
x = np.array([1, 2, 3], dtype=object)
assert_raises(RuntimeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
"""Ticket #1756 """
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d"%i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
"""Check that alignment flag is updated on stride setting"""
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
y = np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
"p13\ntp14\nb.")
if sys.version_info[0] >= 3:
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
"tp8\nRp9\n."),
'different'),
]
if sys.version_info[0] >= 3:
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
@dec.skipif('__pypy__' in sys.builtin_module_names)
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
@dec.skipif(not hasattr(sys, 'getrefcount'))
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
res = np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for Numpy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([sixu('abcd')])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2, ), dtype)
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')],
[sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
self.assertTrue(arr is not arr_cp)
self.assertTrue(isinstance(arr_cp, type(arr)))
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
import operator as op
# dummy class where __array__ throws exception
class Foo(object):
__array_priority__ = 1002
def __array__(self,*args,**kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
if sys.version_info[0] >= 3:
assert_raises(TypeError, f, lhs, rhs)
else:
f(lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1,2,3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
if __name__ == "__main__":
run_module_suite()
|
NextThought/pypy-numpy
|
numpy/core/tests/test_regression.py
|
Python
|
bsd-3-clause
| 77,655
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A demonstration of evoMPS by simulation of quench dynamics
for the transverse Ising model.
@author: Ashley Milsted
"""
import scipy as sp
import evoMPS.tdvp_gen as tdvp
import unittest
x_ss = sp.array([[0, 1],
[1, 0]])
y_ss = 1.j * sp.array([[0, -1],
[1, 0]])
z_ss = sp.array([[1, 0],
[0, -1]])
def get_ham(N, J, h):
ham = -J * (sp.kron(x_ss, x_ss) + h * sp.kron(z_ss, sp.eye(2))).reshape(2, 2, 2, 2)
ham_end = ham + h * sp.kron(sp.eye(2), z_ss).reshape(2, 2, 2, 2)
return [None] + [ham] * (N - 2) + [ham_end]
def get_E_crit(N):
return - 2 * abs(sp.sin(sp.pi * (2 * sp.arange(N) + 1) / (2 * (2 * N + 1)))).sum()
class TestOps(unittest.TestCase):
def test_ising_crit_im_tdvp(self):
N = 5
s = tdvp.EvoMPS_TDVP_Generic(N, [1024] * (N + 1), [2] * (N + 1), get_ham(N, 1.0, 1.0))
E = get_E_crit(N)
tol = 1E-5 #Should result in correct energy to ~1E-12
eta = 1
while eta > tol:
s.update()
H = s.H_expect
s.take_step(0.1)
eta = s.eta.real.sum()
s.update()
self.assertTrue(sp.allclose(E, H))
self.assertLessEqual(s.expect_1s(x_ss, 1), 10 * tol)
self.assertLessEqual(s.expect_1s(y_ss, 1), 10 * tol)
def test_ising_crit_im_tdvp_RK4(self):
N = 5
s = tdvp.EvoMPS_TDVP_Generic(N, [1024] * (N + 1), [2] * (N + 1), get_ham(N, 1.0, 1.0))
E = get_E_crit(N)
tol = 1E-5 #Should result in correct energy to ~1E-12
eta = 1
while eta > tol:
s.update()
H = s.H_expect
s.take_step_RK4(0.1)
eta = s.eta.real.sum()
self.assertTrue(sp.allclose(E, H))
if __name__ == '__main__':
unittest.main()
|
ftranschel/evoMPS
|
evoMPS/tests/tdvp_gen_test_ising.py
|
Python
|
bsd-3-clause
| 1,990
|
try:
#version 0.9+
from klampt.plan.motionplanning import interpolate_nd_min_time,brake_nd
from klampt.plan.motionplanning import combine_nd_cubic
except ImportError:
#version 0.8.x
from klampt.plan.motionplanning import interpolateNDMinTime as interpolate_nd_min_time
from klampt.plan.motionplanning import combineNDCubic as combine_nd_cubic
brake_nd = None
from klampt.math.spline import hermite_eval,hermite_deriv
from klampt.math import vectorops
from klampt.model.trajectory import Trajectory,HermiteTrajectory
import math
import warnings
class VelocityBoundedMotionGeneration:
"""A motion generator that limits the element-wise velocities for
continuously generated position targets. Note that the target
is reached simultaneously on each dimension.
Usage::
from klampt.control.utils import TimedLooper
x0 = robot.getPosition()
generator = VelocityBoundedMotionGeneration(x0,vmax)
dt = 1.0/rate #some control rate
looper = TimedLooper(dt)
while looper:
#TODO: send commands, e.g., generator.setTarget(xtgt)
(x,v) = generator.update(dt)
robot.setPosition(x)
Args:
x0 (list of floats): the start position
vmax (list of floats, optional): the velocity limits
"""
def __init__(self,x0,vmax):
if len(x0) != len(vmax):
raise ValueError("Invalid length of vmax")
n = len(x0)
self.x = [v for v in x0]
self.v = [0]*n
self.vmax = vmax
self.times = [0]
self.milestones = [self.x]
self.curTime = 0
self.trajTime = 0
def remainingTime(self):
"""Returns the remaining time to finish the path, in s. Returns 0
if the path is done executing.
"""
return max(0,self.times[-1]-self.curTime)
def reset(self,x0):
"""Resets the motion generator to the start position x0."""
self.x = x0
self.v = [0]*len(x0)
self.times = [0]
self.milestones = [self.x]
self.curTime = 0
self.trajTime = 0
def update(self,dt,xtarget=None):
"""Updates the motion generator. Optionally sets the new target.
Returns:
tuple: (x,v) giving the new state
"""
if dt <= 0:
raise ValueError("Invalid dt")
if xtarget is not None:
self.setTarget(xtarget)
self.curTime += dt
self.trajTime += dt
xnew,vnew = self.predict(0)
self.x = xnew
self.v = vnew
return xnew,vnew
def setTarget(self,xtarget,append=False):
"""Sets a new target. If append=True, appends the target
to the motion queue.
"""
assert len(xtarget) == len(self.x)
self._trim(append)
xlast = self.milestones[-1]
tmax = 0
for (x,xt,vmax) in zip(xlast,xtarget,self.vmax):
dx = xt-x
tmax = max(tmax, abs(dx) / vmax)
self.times.append(self.times[-1]+tmax)
self.milestones.append(xtarget)
def setVelocity(self,vtarget,duration=1,append=False):
"""Sets a velocity command to vtarget. Moves along this
speed for duration seconds and then stops.
"""
assert len(vtarget) == len(self.x)
assert duration >= 0
self._trim(append)
xlast = self.milestones[-1]
self.times.append(self.times[-1]+duration)
self.milestones.append(vectorops.madd(xlast,vtarget,duration))
def brake(self):
"""Stops as quickly as possible. Since acceleration is unbounded,
this just stops immediately."""
self.setTarget(self.x)
def _trim(self,append):
newtimes = [0]
newmilestones = [self.x]
if append:
#cut prior trajectory to trajTime
for j,t in enumerate(self.times):
if t > self.trajTime:
newtimes.append(t-self.trajTime)
newmilestones.append(self.milestones[j])
else:
#just reset prior trajectory
pass
self.times = newtimes
self.milestones = newmilestones
self.trajTime = 0
def duration(self):
"""Returns the time remaining in the trajectory"""
return max(self.times[-1]-self.trajTime,0)
def target(self):
"""Returns the final position on the trajectory"""
return self.milestones[-1]
def predict(self,t):
"""Predicts the state that the system will be in at time t>=0 in the
future, assuming no changes to the target.
Returns:
tuple: (x,v) giving the new state
"""
t = t + self.trajTime
if t < self.times[0]:
return self.x,self.v
j = 0
while j+1 < len(self.times):
if t < self.times[j+1]:
u = (t - self.times[j])/(self.times[j+1]-self.times[j])
speed = 1.0/(self.times[j+1]-self.times[j])
x = vectorops.interpolate(self.milestones[j],self.milestones[j+1],u)
v = vectorops.mul(vectorops.sub(self.milestones[j+1],self.milestones[j],speed))
return x,v
j += 1
return self.milestones[-1],[0]*len(self.x)
def trajectory(self):
"""Returns the future trajectory as a Trajectory.
"""
times = [0]
milestones = [self.x]
for j in range(len(self.times)):
if self.trajTime < self.times[j]:
times.append(self.times[j]-self.trajTime)
milestones.append(self.milestones[j])
return Trajectory(times,milestones)
class AccelerationBoundedMotionGeneration:
"""A motion generator similar to the Reflexxes library, which
provides acceleration- and velocity-bounded smooth trajectories
for arbitrary targets.
Usage::
from klampt.control.utils import TimedLooper
x0 = robot.getPosition()
generator = AccelerationBoundedMotionGeneration(x0,vmax)
dt = 1.0/rate #some control rate
looper = TimedLooper(dt)
while looper:
#TODO: send commands, e.g., generator.setTarget(xtgt)
(x,v) = generator.update(dt)
robot.setPosition(x)
Args:
x0 (list of floats): the start position
xmin (list of floats, optional): the lower position joint limits
xmax (list of floats, optional): the upper position joint limits
vmax (list of floats, optional): the velocity limits
amax (list of floats): the acceleration limits. Non-optional (for now.)
"""
def __init__(self,x0,xmin=None,xmax=None,vmax=None,amax=None):
if len(x0) != len(vmax):
raise ValueError("Invalid length of vmax")
if len(x0) != len(amax):
raise ValueError("Invalid length of amax")
n = len(x0)
self.x = [v for v in x0]
self.v = [0]*n
self.xmin = xmin if xmin is not None else [-float('inf')]*n
self.xmax = xmax if xmax is not None else [float('inf')]*n
self.vmax = vmax if vmax is not None else [float('inf')]*n
self.amax = amax if amax is not None else [float('inf')]*n
if amax is None:
raise ValueError("amax needs to be specified")
self.times = [[0] for v in x0]
self.milestones = [[v] for v in x0]
self.dmilestones = [[0] for v in x0]
self.trajTime = 0
self.curTime = 0
def remainingTime(self):
"""Returns the remaining time to finish the path, in s. Returns 0
if the path is done executing.
"""
return max(0,max(t[-1]-self.trajTime for t in self.times))
def reset(self,x0):
"""Resets the motion generator to the start position x0."""
if len(x0) != len(self.amax):
raise ValueError("Invalid length of the configuration")
self.x = x0
self.v = [0]*len(x0)
self.times = [[0] for v in x0]
self.milestones = [[v] for v in x0]
self.dmilestones = [[0] for v in x0]
self.trajTime = 0
def update(self,dt,xtarget=None,vtarget=None):
"""Updates the motion generator. Optionally sets the new target and velocity.
If velocity is None, ends in 0 velocity.
Returns:
tuple: (x,v) giving the new state
"""
if dt <= 0:
raise ValueError("Invalid dt")
if xtarget is not None:
self.setTarget(xtarget,vtarget)
self.trajTime += dt
self.curTime += dt
x,v = self.predict(0)
self.x = x
self.v = v
return x,v
def setTarget(self,xtarget,vtarget=None,append=False):
"""Sets a new target position xtarget and optional velocity vtarget.
If append=True, appends the target to the motion queue.
"""
assert len(xtarget) == len(self.x)
if vtarget is None:
vtarget = [0]*len(xtarget)
assert len(vtarget) == len(self.x)
self._trim(append)
t,m,v = interpolate_nd_min_time(self.x,self.v,xtarget,vtarget,
self.xmin,self.xmax,self.vmax,self.amax)
if len(t)==0:
if self.x != xtarget or self.v != vtarget:
warnings.warn("Cannot solve for path from {}, {} to target {}, {}".format(self.x,self.v,xtarget,vtarget))
else:
assert len(t) == len(m)
assert len(t) == len(v)
assert len(t) == len(self.x)
for i,vi in enumerate(v):
for j,vj in enumerate(vi):
if abs(vj) > self.vmax[i]:
vj = min(max(-self.vmax[i],vj),self.vmax[i])
if abs(vj) > self.vmax[i]*1.001:
warnings.warn("Solved velocity {} is larger than vmax {}".format(vi,self.vmax[i]))
vi[j] = vj
for i in range(len(self.x)):
self.times[i] += [x+self.trajTime for x in t[i][1:]]
self.milestones[i] += m[i][1:]
self.dmilestones[i] += v[i][1:]
self._checkValid()
def setVelocity(self,vtarget,duration=1,append=False):
"""Sets a velocity command to vtarget. Moves along this
speed for duration seconds and then stops.
"""
assert len(vtarget)==len(self.x)
assert duration >= 0
self._trim(append)
self._append_ramp_velocity(vtarget)
#go straight for a duration
for i in range(len(self.x)):
self.times[i].append(self.times[i][-1]+duration)
self.milestones[i].append(self.milestones[-1][i]+vtarget[i]*duration)
self.dmilestones[i].append(vtarget)
#ramp down
self._append_ramp_velocity([0]*len(self.x))
self._checkValid()
def brake(self):
"""Stops as quickly as possible under the acceleration bounds.
"""
self._trim(False)
if brake_nd is None:
#0.8.x Klampt
raise NotImplementedError("brake() not available in Klampt 0.8.x")
t,m,v = brake_nd(self.x,self.v,self.xmin,self.xmax,self.amax)
for i in range(len(self.x)):
assert t[i][0] == 0
self.times[i] += [x+self.trajTime for x in t[i][1:]]
self.milestones[i] += m[i][1:]
self.dmilestones[i] += v[i][1:]
self._checkValid()
def _append_ramp_velocity(self,vtarget):
vlast = self.dmilestones[-1]
#ramp up to vtarget
tmax = 0
for i in range(len(self.x)):
dv = vtarget[i] - vlast[i]
tmax = max(tmax,abs(dv/self.amax[i]))
if tmax > 0:
#ramp up to tmax with parabolic curve
for i in range(len(self.x)):
self.times[i].append(self.times[i][-1]+tmax)
dv = vtarget[i] - vlast[i]
a = dv / tmax
self.milestones[i].append(self.milestones[-1][i]+self.dmilestones[-1][i]*tmax + a*0.5*tmax**2)
self.dmilestones[i].append(vtarget[i])
self._checkValid()
def _trim(self,append):
newtimes = [[0] for v in self.x]
newmilestones = [[v] for v in self.x]
newdmilestones = [[v] for v in self.v]
if append:
#cut prior trajectory to trajTime
for i in range(len(self.x)):
for j,t in enumerate(self.times[i]):
if t > self.trajTime:
newtimes[i].append(t-self.trajTime)
newmilestones[i].append(self.milestones[i][j])
newdmilestones[i].append(self.dmilestones[i][j])
else:
#reset prior path
pass
self.times = newtimes
self.milestones = newmilestones
self.dmilestones = newdmilestones
self.trajTime = 0
self._checkValid()
def duration(self):
"""Returns the time remaining in the trajectory"""
return max(self.times[0][-1]-self.trajTime,0)
def target(self):
"""Returns the final position on the trajectory"""
return [mi[-1] for mi in self.milestones]
def predict(self,t):
"""Predicts the state that the system will be in at time t>=0 in the
future, assuming no changes to the target.
Args:
t (float): the time in the future. Should be >=0.
Returns:
tuple: (x,v) giving the predicted state
"""
t = t + self.trajTime
x = []
v = []
for i in range(len(self.times)):
ti,mi,dmi = self.times[i],self.milestones[i],self.dmilestones[i]
#evaluate trajectory
j = 0
xi = mi[-1]
vi = dmi[-1]
if t < ti[-1]:
while j+1 < len(ti):
if t < ti[j+1]:
assert t >= ti[j]
dt = (ti[j+1]-ti[j])
u = (t-ti[j])/dt
xi = hermite_eval([mi[j]],[dmi[j]*dt],[mi[j+1]],[dmi[j+1]*dt],u)[0]
vi = hermite_deriv([mi[j]],[dmi[j]*dt],[mi[j+1]],[dmi[j+1]*dt],u)[0]/dt
if abs(vi) > self.vmax[i]:
if abs(vi) > self.vmax[i]*1.001:
warnings.warn("{} {} -> {} {} at u={}, dt={}".format(mi[j],dmi[j],mi[j+1],dmi[j+1],u,(ti[j+1]-ti[j])))
warnings.warn("Evaluated velocity {} is larger than vmax {}".format(vi,self.vmax[i]))
vi = min(max(-self.vmax[i],vi),self.vmax[i])
break
j += 1
x.append(xi)
v.append(vi)
return x,v
def trajectory(self):
"""Returns the future trajectory as a HermiteTrajectory.
"""
self._checkValid()
times,milestones,dmilestones = combine_nd_cubic(self.times,self.milestones,self.dmilestones)
prefix,suffix = HermiteTrajectory(times,milestones,dmilestones).split(self.trajTime)
suffix.times = [t-self.trajTime for t in suffix.times]
return suffix
def _checkValid(self):
assert len(self.x) == len(self.v)
assert len(self.x) == len(self.times)
assert len(self.times) == len(self.milestones)
assert len(self.times) == len(self.dmilestones)
for i in range(len(self.times)):
assert len(self.times[i]) == len(self.milestones[i])
assert len(self.times[i]) == len(self.dmilestones[i])
for j in range(len(self.times[i])-1):
assert self.times[i][j+1] >= self.times[i][j]
|
krishauser/Klampt
|
Python/klampt/control/motion_generation.py
|
Python
|
bsd-3-clause
| 15,765
|
from __future__ import print_function
import sys, time, random, os, json
from six.moves.urllib.parse import urlencode
from subprocess import Popen, PIPE
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.web.static import File
from twisted.web.test.test_webclient import PayloadResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.resource import EncodingResourceWrapper
from twisted.web.util import redirectTo
from twisted.internet import reactor, ssl
from twisted.internet.task import deferLater
from scrapy.utils.python import to_bytes, to_unicode
from tests import tests_datadir
def getarg(request, name, default=None, type=None):
if name in request.args:
value = request.args[name][0]
if type is not None:
value = type(value)
return value
else:
return default
class LeafResource(Resource):
isLeaf = True
def deferRequest(self, request, delay, f, *a, **kw):
def _cancelrequest(_):
# silence CancelledError
d.addErrback(lambda _: None)
d.cancel()
d = deferLater(reactor, delay, f, *a, **kw)
request.notifyFinish().addErrback(_cancelrequest)
return d
class Follow(LeafResource):
def render(self, request):
total = getarg(request, b"total", 100, type=int)
show = getarg(request, b"show", 1, type=int)
order = getarg(request, b"order", b"desc")
maxlatency = getarg(request, b"maxlatency", 0, type=float)
n = getarg(request, b"n", total, type=int)
if order == b"rand":
nlist = [random.randint(1, total) for _ in range(show)]
else: # order == "desc"
nlist = range(n, max(n - show, 0), -1)
lag = random.random() * maxlatency
self.deferRequest(request, lag, self.renderRequest, request, nlist)
return NOT_DONE_YET
def renderRequest(self, request, nlist):
s = """<html> <head></head> <body>"""
args = request.args.copy()
for nl in nlist:
args[b"n"] = [to_bytes(str(nl))]
argstr = urlencode(args, doseq=True)
s += "<a href='/follow?%s'>follow %d</a><br>" % (argstr, nl)
s += """</body>"""
request.write(to_bytes(s))
request.finish()
class Delay(LeafResource):
def render_GET(self, request):
n = getarg(request, b"n", 1, type=float)
b = getarg(request, b"b", 1, type=int)
if b:
# send headers now and delay body
request.write('')
self.deferRequest(request, n, self._delayedRender, request, n)
return NOT_DONE_YET
def _delayedRender(self, request, n):
request.write(to_bytes("Response delayed for %0.3f seconds\n" % n))
request.finish()
class Status(LeafResource):
def render_GET(self, request):
n = getarg(request, b"n", 200, type=int)
request.setResponseCode(n)
return b""
class Raw(LeafResource):
def render_GET(self, request):
request.startedWriting = 1
self.deferRequest(request, 0, self._delayedRender, request)
return NOT_DONE_YET
render_POST = render_GET
def _delayedRender(self, request):
raw = getarg(request, b'raw', b'HTTP 1.1 200 OK\n')
request.startedWriting = 1
request.write(raw)
request.channel.transport.loseConnection()
request.finish()
class Echo(LeafResource):
def render_GET(self, request):
output = {
'headers': dict(
(to_unicode(k), [to_unicode(v) for v in vs])
for k, vs in request.requestHeaders.getAllRawHeaders()),
'body': to_unicode(request.content.read()),
}
return to_bytes(json.dumps(output))
class RedirectTo(LeafResource):
def render(self, request):
goto = getarg(request, b'goto', b'/')
# we force the body content, otherwise Twisted redirectTo()
# returns HTML with <meta http-equiv="refresh"
redirectTo(goto, request)
return b'redirecting...'
class Partial(LeafResource):
def render_GET(self, request):
request.setHeader(b"Content-Length", b"1024")
self.deferRequest(request, 0, self._delayedRender, request)
return NOT_DONE_YET
def _delayedRender(self, request):
request.write(b"partial content\n")
request.finish()
class Drop(Partial):
def _delayedRender(self, request):
abort = getarg(request, b"abort", 0, type=int)
request.write(b"this connection will be dropped\n")
tr = request.channel.transport
try:
if abort and hasattr(tr, 'abortConnection'):
tr.abortConnection()
else:
tr.loseConnection()
finally:
request.finish()
class Root(Resource):
def __init__(self):
Resource.__init__(self)
self.putChild(b"status", Status())
self.putChild(b"follow", Follow())
self.putChild(b"delay", Delay())
self.putChild(b"partial", Partial())
self.putChild(b"drop", Drop())
self.putChild(b"raw", Raw())
self.putChild(b"echo", Echo())
self.putChild(b"payload", PayloadResource())
self.putChild(b"xpayload", EncodingResourceWrapper(PayloadResource(), [GzipEncoderFactory()]))
self.putChild(b"files", File(os.path.join(tests_datadir, 'test_site/files/')))
self.putChild(b"redirect-to", RedirectTo())
def getChild(self, name, request):
return self
def render(self, request):
return b'Scrapy mock HTTP server\n'
class MockServer():
def __enter__(self):
from scrapy.utils.test import get_testenv
self.proc = Popen([sys.executable, '-u', '-m', 'tests.mockserver'],
stdout=PIPE, env=get_testenv())
self.proc.stdout.readline()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.proc.kill()
self.proc.wait()
time.sleep(0.2)
def ssl_context_factory(keyfile='keys/cert.pem', certfile='keys/cert.pem'):
return ssl.DefaultOpenSSLContextFactory(
os.path.join(os.path.dirname(__file__), keyfile),
os.path.join(os.path.dirname(__file__), certfile),
)
if __name__ == "__main__":
root = Root()
factory = Site(root)
httpPort = reactor.listenTCP(8998, factory)
contextFactory = ssl_context_factory()
httpsPort = reactor.listenSSL(8999, factory, contextFactory)
def print_listening():
httpHost = httpPort.getHost()
httpsHost = httpsPort.getHost()
print("Mock server running at http://%s:%d and https://%s:%d" % (
httpHost.host, httpHost.port, httpsHost.host, httpsHost.port))
reactor.callWhenRunning(print_listening)
reactor.run()
|
taito/scrapy
|
tests/mockserver.py
|
Python
|
bsd-3-clause
| 6,893
|
__author__ = "Iacopo Spalletti"
__email__ = "i.spalletti@nephila.it"
__version__ = "2.0.1.dev0"
|
nephila/djangocms-installer
|
djangocms_installer/__init__.py
|
Python
|
bsd-3-clause
| 96
|
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromite main test runner.
Run the specified tests. If none are specified, we'll scan the
tree looking for tests to run and then only run the semi-fast ones.
You can add a .testignore file to a dir to disable scanning it.
"""
from __future__ import print_function
import errno
import multiprocessing
import os
import signal
import stat
import sys
import tempfile
from chromite.cbuildbot import constants
from chromite.lib import cgroups
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import namespaces
from chromite.lib import osutils
from chromite.lib import proctitle
from chromite.lib import timeout_util
# How long (in minutes) to let a test run before we kill it.
TEST_TIMEOUT = 20
# How long (in minutes) before we send SIGKILL after the timeout above.
TEST_SIG_TIMEOUT = 5
# How long (in seconds) to let tests clean up after CTRL+C is sent.
SIGINT_TIMEOUT = 5
# How long (in seconds) to let all children clean up after CTRL+C is sent.
CTRL_C_TIMEOUT = SIGINT_TIMEOUT + 5
# Test has to run inside the chroot.
INSIDE = 'inside'
# Test has to run outside the chroot.
OUTSIDE = 'outside'
# Don't run this test (please add a comment as to why).
SKIP = 'skip'
# List all exceptions, with a token describing what's odd here.
SPECIAL_TESTS = {
# Tests that need to run inside the chroot.
'cbuildbot/stages/test_stages_unittest': INSIDE,
'cli/cros/cros_build_unittest': INSIDE,
'cli/cros/cros_chroot_unittest': INSIDE,
'cli/cros/cros_debug_unittest': INSIDE,
'cli/cros/lint_unittest': INSIDE,
'cli/deploy_unittest': INSIDE,
'lib/alerts_unittest': INSIDE,
'lib/chroot_util_unittest': INSIDE,
'lib/filetype_unittest': INSIDE,
'lib/upgrade_table_unittest': INSIDE,
'scripts/cros_install_debug_syms_unittest': INSIDE,
'scripts/cros_list_modified_packages_unittest': INSIDE,
'scripts/cros_mark_as_stable_unittest': INSIDE,
'scripts/cros_mark_chrome_as_stable_unittest': INSIDE,
'scripts/cros_mark_mojo_as_stable_unittest': INSIDE,
'scripts/sync_package_status_unittest': INSIDE,
'scripts/cros_portage_upgrade_unittest': INSIDE,
'scripts/dep_tracker_unittest': INSIDE,
'scripts/test_image_unittest': INSIDE,
'scripts/upload_package_status_unittest': INSIDE,
# Tests that need to run outside the chroot.
'lib/cgroups_unittest': OUTSIDE,
# Tests that take >2 minutes to run. All the slow tests are
# disabled atm though ...
#'scripts/cros_portage_upgrade_unittest': SKIP,
}
SLOW_TESTS = {
# Tests that require network can be really slow.
'buildbot/manifest_version_unittest': SKIP,
'buildbot/repository_unittest': SKIP,
'buildbot/remote_try_unittest': SKIP,
'lib/cros_build_lib_unittest': SKIP,
'lib/gerrit_unittest': SKIP,
'lib/patch_unittest': SKIP,
# cgroups_unittest runs cros_sdk a lot, so is slow.
'lib/cgroups_unittest': SKIP,
}
def RunTest(test, cmd, tmpfile, finished, total):
"""Run |test| with the |cmd| line and save output to |tmpfile|.
Args:
test: The human readable name for this test.
cmd: The full command line to run the test.
tmpfile: File to write test output to.
finished: Counter to update when this test finishes running.
total: Total number of tests to run.
Returns:
The exit code of the test.
"""
logging.info('Starting %s', test)
def _Finished(_log_level, _log_msg, result, delta):
with finished.get_lock():
finished.value += 1
if result.returncode:
func = logging.error
msg = 'Failed'
else:
func = logging.info
msg = 'Finished'
func('%s [%i/%i] %s (%s)', msg, finished.value, total, test, delta)
ret = cros_build_lib.TimedCommand(
cros_build_lib.RunCommand, cmd, capture_output=True, error_code_ok=True,
combine_stdout_stderr=True, debug_level=logging.DEBUG,
int_timeout=SIGINT_TIMEOUT, timed_log_callback=_Finished)
if ret.returncode:
tmpfile.write(ret.output)
if not ret.output:
tmpfile.write('<no output>\n')
tmpfile.close()
return ret.returncode
def BuildTestSets(tests, chroot_available, network):
"""Build the tests to execute.
Take care of special test handling like whether it needs to be inside or
outside of the sdk, whether the test should be skipped, etc...
Args:
tests: List of tests to execute.
chroot_available: Whether we can execute tests inside the sdk.
network: Whether to execute network tests.
Returns:
List of tests to execute and their full command line.
"""
testsets = []
for test in tests:
cmd = [test]
# See if this test requires special consideration.
status = SPECIAL_TESTS.get(test)
if status is SKIP:
logging.info('Skipping %s', test)
continue
elif status is INSIDE:
if not cros_build_lib.IsInsideChroot():
if not chroot_available:
logging.info('Skipping %s: chroot not available', test)
continue
cmd = ['cros_sdk', '--', os.path.join('..', '..', 'chromite', test)]
elif status is OUTSIDE:
if cros_build_lib.IsInsideChroot():
logging.info('Skipping %s: must be outside the chroot', test)
continue
else:
mode = os.stat(test).st_mode
if stat.S_ISREG(mode):
if not mode & 0o111:
logging.debug('Skipping %s: not executable', test)
continue
else:
logging.debug('Skipping %s: not a regular file', test)
continue
# Build up the final test command.
cmd.append('--verbose')
if network:
cmd.append('--network')
cmd = ['timeout', '--preserve-status', '-k', '%sm' % TEST_SIG_TIMEOUT,
'%sm' % TEST_TIMEOUT] + cmd
testsets.append((test, cmd, tempfile.TemporaryFile()))
return testsets
def RunTests(tests, jobs=1, chroot_available=True, network=False, dryrun=False,
failfast=False):
"""Execute |paths| with |jobs| in parallel (including |network| tests).
Args:
tests: The tests to run.
jobs: How many tests to run in parallel.
chroot_available: Whether we can run tests inside the sdk.
network: Whether to run network based tests.
dryrun: Do everything but execute the test.
failfast: Stop on first failure
Returns:
True if all tests pass, else False.
"""
finished = multiprocessing.Value('i')
testsets = []
pids = []
failed = aborted = False
def WaitOne():
(pid, status) = os.wait()
pids.remove(pid)
return status
# Launch all the tests!
try:
# Build up the testsets.
testsets = BuildTestSets(tests, chroot_available, network)
# Fork each test and add it to the list.
for test, cmd, tmpfile in testsets:
if failed and failfast:
logging.error('failure detected; stopping new tests')
break
if len(pids) >= jobs:
if WaitOne():
failed = True
pid = os.fork()
if pid == 0:
proctitle.settitle(test)
ret = 1
try:
if dryrun:
logging.info('Would have run: %s', cros_build_lib.CmdToStr(cmd))
ret = 0
else:
ret = RunTest(test, cmd, tmpfile, finished, len(testsets))
except KeyboardInterrupt:
pass
except BaseException:
logging.error('%s failed', test, exc_info=True)
# We cannot run clean up hooks in the child because it'll break down
# things like tempdir context managers.
os._exit(ret) # pylint: disable=protected-access
pids.append(pid)
# Wait for all of them to get cleaned up.
while pids:
if WaitOne():
failed = True
except KeyboardInterrupt:
# If the user wants to stop, reap all the pending children.
logging.warning('CTRL+C received; cleaning up tests')
aborted = True
CleanupChildren(pids)
# Walk through the results.
failed_tests = []
for test, cmd, tmpfile in testsets:
tmpfile.seek(0)
output = tmpfile.read()
if output:
failed_tests.append(test)
print()
logging.error('### LOG: %s', test)
print(output.rstrip())
print()
if failed_tests:
logging.error('The following %i tests failed:\n %s', len(failed_tests),
'\n '.join(sorted(failed_tests)))
return False
elif aborted or failed:
return False
return True
def CleanupChildren(pids):
"""Clean up all the children in |pids|."""
# Note: SIGINT was already sent due to the CTRL+C via the kernel itself.
# So this func is just waiting for them to clean up.
handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
def _CheckWaitpid(ret):
(pid, _status) = ret
if pid:
try:
pids.remove(pid)
except ValueError:
# We might have reaped a grandchild -- be robust.
pass
return len(pids)
def _Waitpid():
try:
return os.waitpid(-1, os.WNOHANG)
except OSError as e:
if e.errno == errno.ECHILD:
# All our children went away!
pids[:] = []
return (0, 0)
else:
raise
def _RemainingTime(remaining):
print('\rwaiting %s for %i tests to exit ... ' % (remaining, len(pids)),
file=sys.stderr, end='')
try:
timeout_util.WaitForSuccess(_CheckWaitpid, _Waitpid,
timeout=CTRL_C_TIMEOUT, period=0.1,
side_effect_func=_RemainingTime)
print('All tests cleaned up!')
return
except timeout_util.TimeoutError:
# Let's kill them hard now.
print('Hard killing %i tests' % len(pids))
for pid in pids:
try:
os.kill(pid, signal.SIGKILL)
except OSError as e:
if e.errno != errno.ESRCH:
raise
finally:
signal.signal(signal.SIGINT, handler)
def FindTests(search_paths=('.',)):
"""Find all the tests available in |search_paths|."""
for search_path in search_paths:
for root, dirs, files in os.walk(search_path):
if os.path.exists(os.path.join(root, '.testignore')):
# Delete the dir list in place.
dirs[:] = []
continue
dirs[:] = [x for x in dirs if x[0] != '.']
for path in files:
test = os.path.join(os.path.relpath(root, search_path), path)
if test.endswith('_unittest'):
yield test
def ChrootAvailable():
"""See if `cros_sdk` will work at all.
If we try to run unittests in the buildtools group, we won't be able to
create one.
"""
ret = cros_build_lib.RunCommand(
['repo', 'list'], capture_output=True, error_code_ok=True,
combine_stdout_stderr=True, debug_level=logging.DEBUG)
return 'chromiumos-overlay' in ret.output
def _ReExecuteIfNeeded(argv, network):
"""Re-execute as root so we can unshare resources."""
if os.geteuid() != 0:
cmd = ['sudo', '-E', 'HOME=%s' % os.environ['HOME'],
'PATH=%s' % os.environ['PATH'], '--'] + argv
os.execvp(cmd[0], cmd)
else:
cgroups.Cgroup.InitSystem()
namespaces.SimpleUnshare(net=not network, pid=True)
# We got our namespaces, so switch back to the user to run the tests.
gid = int(os.environ.pop('SUDO_GID'))
uid = int(os.environ.pop('SUDO_UID'))
user = os.environ.pop('SUDO_USER')
os.initgroups(user, gid)
os.setresgid(gid, gid, gid)
os.setresuid(uid, uid, uid)
os.environ['USER'] = user
def GetParser():
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('-f', '--failfast', default=False, action='store_true',
help='Stop on first failure')
parser.add_argument('-q', '--quick', default=False, action='store_true',
help='Only run the really quick tests')
parser.add_argument('-n', '--dry-run', default=False, action='store_true',
dest='dryrun',
help='Do everything but actually run the test')
parser.add_argument('-l', '--list', default=False, action='store_true',
help='List all the available tests')
parser.add_argument('-j', '--jobs', type=int,
help='Number of tests to run in parallel at a time')
parser.add_argument('--network', default=False, action='store_true',
help='Run tests that depend on good network connectivity')
parser.add_argument('tests', nargs='*', default=None, help='Tests to run')
return parser
def main(argv):
parser = GetParser()
opts = parser.parse_args(argv)
opts.Freeze()
# Process list output quickly as it takes no privileges.
if opts.list:
print('\n'.join(sorted(opts.tests or FindTests((constants.CHROMITE_DIR,)))))
return
# Many of our tests require a valid chroot to run. Make sure it's created
# before we block network access.
chroot = os.path.join(constants.SOURCE_ROOT, constants.DEFAULT_CHROOT_DIR)
if (not os.path.exists(chroot) and
ChrootAvailable() and
not cros_build_lib.IsInsideChroot()):
cros_build_lib.RunCommand(['cros_sdk', '--create'])
# Now let's run some tests.
_ReExecuteIfNeeded([sys.argv[0]] + argv, opts.network)
# A lot of pieces here expect to be run in the root of the chromite tree.
# Make them happy.
os.chdir(constants.CHROMITE_DIR)
tests = opts.tests or FindTests()
if opts.quick:
SPECIAL_TESTS.update(SLOW_TESTS)
jobs = opts.jobs or multiprocessing.cpu_count()
with cros_build_lib.ContextManagerStack() as stack:
# If we're running outside the chroot, try to contain ourselves.
if cgroups.Cgroup.IsSupported() and not cros_build_lib.IsInsideChroot():
stack.Add(cgroups.SimpleContainChildren, 'run_tests')
# Throw all the tests into a custom tempdir so that if we do CTRL+C, we can
# quickly clean up all the files they might have left behind.
stack.Add(osutils.TempDir, prefix='chromite.run_tests.', set_global=True,
sudo_rm=True)
def _Finished(_log_level, _log_msg, result, delta):
if result:
logging.info('All tests succeeded! (%s total)', delta)
ret = cros_build_lib.TimedCommand(
RunTests, tests, jobs=jobs, chroot_available=ChrootAvailable(),
network=opts.network, dryrun=opts.dryrun, failfast=opts.failfast,
timed_log_callback=_Finished)
if not ret:
return 1
if not opts.network:
logging.warning('Network tests skipped; use --network to run them')
|
guorendong/iridium-browser-ubuntu
|
third_party/chromite/cbuildbot/run_tests.py
|
Python
|
bsd-3-clause
| 14,581
|
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-attachments',
version='0.1',
packages=['attachments'],
include_package_data=True,
license='BSD License', # example license
description='App for managing images and documents.',
long_description=README,
url='',
author='Florian Brozek',
author_email='florian.brozek@gmail.com',
install_requires = (
'django>=1.7',
'django-braces>=1.4.0',
'easy-thumbnails>=2.0'
),
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
zdot/django-attachments
|
setup.py
|
Python
|
bsd-3-clause
| 1,381
|
import json
from tornado import web
import werkzeug.http
def get_handlers():
return [
('/login', AuthHandler),
]
class AuthHandler(web.RequestHandler):
"""Authorization Endpoint.
This is the primary endpoint for this service.
"""
def prepare(self):
super(AuthHandler, self).prepare()
self._json_body = None
def post(self):
"""Attempts to log in to the service.
:reqjson string username: name of the user to authorize
:reqjson string password: password of the user
:status 400: the request is invalid and cannot be processed
:status 403: the authorization request was denied
"""
try:
username = self.json['username']
password = self.json['password']
except KeyError:
raise web.HTTPError(400)
@property
def json(self):
"""Decoded JSON body as a ``dict``"""
if self._json_body is None:
content_type, content_params = werkzeug.http.parse_options_header(
self.request.headers.get('Content-Type',
'application/octet-stream'))
if not content_type.startswith('application/json'):
raise web.HTTPError(415)
self._json_body = json.loads(self.request.body.decode('utf-8'))
return self._json_body
|
dave-shawley/tokenauth
|
tokenauth/handlers.py
|
Python
|
bsd-3-clause
| 1,390
|
# -*- coding: utf-8 -*-
import os
from flask import Flask, redirect, url_for
app = Flask(__name__)
app.config.from_object('app.config')
from app import views
|
pengan1987/pythonDev
|
app/__init__.py
|
Python
|
bsd-3-clause
| 166
|
from selenium.webdriver.common.by import By
from pages.desktop.base import Base
class DevhubSubmission(Base):
"""Devhub submission page legacy view.
This will fill and submit an addon.
"""
_name_locator = (By.ID, 'id_name')
_summary_locator = (By.ID, 'id_summary_0')
_license_btn_locator = (By.ID, 'id_license-builtin_0')
_submit_btn_locator = (
By.CSS_SELECTOR, '.submission-buttons > button:nth-child(1)'
)
_appearance_categories_locator = (By.ID, 'id_form-0-categories_0')
_bookmarks_categories_locator = (By.ID, 'id_form-0-categories_1')
_edit_submission_btn_locator = (
By.CSS_SELECTOR,
'.addon-submission-process > p:nth-child(7) > a:nth-child(1)'
)
def wait_for_page_to_load(self):
self.wait.until(
lambda _: self.is_element_displayed(*self._name_locator)
)
return self
def fill_addon_submission_form(self):
"""Fill addon submission form.
Currently there is a prefilled suggested name, specificying your own
name will just be added to the end of the current name. The default
name is "Ui-Test-Devhub-ext".
"""
self.find_element(*self._summary_locator).send_keys('Words go here')
self.find_element(*self._appearance_categories_locator).click()
self.find_element(*self._bookmarks_categories_locator).click()
self.find_element(*self._license_btn_locator).click()
self.find_element(*self._submit_btn_locator).click()
self.wait.until(
lambda _: self.selenium.find_element(
*self._edit_submission_btn_locator
).is_displayed()
)
self.selenium.find_element(*self._edit_submission_btn_locator).click()
from pages.desktop.manage_submissions import ManageSubmissions
subs = ManageSubmissions(self.selenium, self.base_url)
return subs.wait_for_page_to_load()
|
psiinon/addons-server
|
tests/ui/pages/desktop/devhub_submission.py
|
Python
|
bsd-3-clause
| 1,947
|
# -*- coding: utf-8 -*-
"""The check functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
from builtins import input # no-op here but facilitates testing
from difflib import get_close_matches
from distutils.version import LooseVersion
import operator
import os
import os.path as op
from pathlib import Path
import sys
import warnings
import numpy as np
from ..fixes import _median_complex
from ._logging import warn, logger
def _ensure_int(x, name='unknown', must_be='an int'):
"""Ensure a variable is an integer."""
# This is preferred over numbers.Integral, see:
# https://github.com/scipy/scipy/pull/7351#issuecomment-299713159
try:
# someone passing True/False is much more likely to be an error than
# intentional usage
if isinstance(x, bool):
raise TypeError()
x = int(operator.index(x))
except TypeError:
raise TypeError('%s must be %s, got %s' % (name, must_be, type(x)))
return x
def check_fname(fname, filetype, endings, endings_err=()):
"""Enforce MNE filename conventions.
Parameters
----------
fname : str
Name of the file.
filetype : str
Type of file. e.g., ICA, Epochs etc.
endings : tuple
Acceptable endings for the filename.
endings_err : tuple
Obligatory possible endings for the filename.
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
if len(endings_err) > 0 and not fname.endswith(endings_err):
print_endings = ' or '.join([', '.join(endings_err[:-1]),
endings_err[-1]])
raise IOError('The filename (%s) for file type %s must end with %s'
% (fname, filetype, print_endings))
print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]])
if not fname.endswith(endings):
warn('This filename (%s) does not conform to MNE naming conventions. '
'All %s files should end with %s'
% (fname, filetype, print_endings))
def check_version(library, min_version='0.0'):
r"""Check minimum library version required.
Parameters
----------
library : str
The library name to import. Must have a ``__version__`` property.
min_version : str
The minimum version string. Anything that matches
``'(\d+ | [a-z]+ | \.)'``. Can also be empty to skip version
check (just check for library presence).
Returns
-------
ok : bool
True if the library exists with at least the specified version.
"""
ok = True
try:
library = __import__(library)
except ImportError:
ok = False
else:
if min_version and \
LooseVersion(library.__version__) < LooseVersion(min_version):
ok = False
return ok
def _require_version(lib, what, version='0.0'):
"""Require library for a purpose."""
if not check_version(lib, version):
extra = f' (version >= {version})' if version != '0.0' else ''
raise ImportError(f'The {lib} package{extra} is required to {what}')
def _check_mayavi_version(min_version='4.3.0'):
"""Check mayavi version."""
if not check_version('mayavi', min_version):
raise RuntimeError("Need mayavi >= %s" % min_version)
# adapted from scikit-learn utils/validation.py
def check_random_state(seed):
"""Turn seed into a numpy.random.mtrand.RandomState instance.
If seed is None, return the RandomState singleton used by np.random.mtrand.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.mtrand.RandomState(seed)
if isinstance(seed, np.random.mtrand.RandomState):
return seed
try:
# Generator is only available in numpy >= 1.17
if isinstance(seed, np.random.Generator):
return seed
except AttributeError:
pass
raise ValueError('%r cannot be used to seed a '
'numpy.random.mtrand.RandomState instance' % seed)
def _check_event_id(event_id, events):
"""Check event_id and convert to default format."""
# check out event_id dict
if event_id is None: # convert to int to make typing-checks happy
event_id = list(np.unique(events[:, 2]))
if isinstance(event_id, dict):
for key in event_id.keys():
_validate_type(key, str, 'Event names')
event_id = {key: _ensure_int(val, 'event_id[%s]' % key)
for key, val in event_id.items()}
elif isinstance(event_id, list):
event_id = [_ensure_int(v, 'event_id[%s]' % vi)
for vi, v in enumerate(event_id)]
event_id = dict(zip((str(i) for i in event_id), event_id))
else:
event_id = _ensure_int(event_id, 'event_id')
event_id = {str(event_id): event_id}
return event_id
def _check_fname(fname, overwrite=False, must_exist=False, name='File',
need_dir=False):
"""Check for file existence."""
_validate_type(fname, 'path-like', name)
if op.exists(fname):
if not overwrite:
raise FileExistsError('Destination file exists. Please use option '
'"overwrite=True" to force overwriting.')
elif overwrite != 'read':
logger.info('Overwriting existing file.')
if must_exist:
if need_dir:
if not op.isdir(fname):
raise IOError(
f'Need a directory for {name} but found a file '
f'at {fname}')
else:
if not op.isfile(fname):
raise IOError(
f'Need a file for {name} but found a directory '
f'at {fname}')
if not os.access(fname, os.R_OK):
raise PermissionError(
f'{name} does not have read permissions: {fname}')
elif must_exist:
raise FileNotFoundError(f'{name} does not exist: {fname}')
return str(op.abspath(fname))
def _check_subject(class_subject, input_subject, raise_error=True,
kind='class subject attribute'):
"""Get subject name from class."""
if input_subject is not None:
_validate_type(input_subject, 'str', "subject input")
if class_subject is not None and input_subject != class_subject:
raise ValueError('%s (%r) did not match input subject (%r)'
% (kind, class_subject, input_subject))
return input_subject
elif class_subject is not None:
_validate_type(class_subject, 'str',
"Either subject input or %s" % (kind,))
return class_subject
elif raise_error is True:
raise ValueError('Neither subject input nor %s was a string' % (kind,))
return None
def _check_preload(inst, msg):
"""Ensure data are preloaded."""
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..time_frequency import _BaseTFR
if isinstance(inst, (_BaseTFR, Evoked)):
pass
else:
name = "epochs" if isinstance(inst, BaseEpochs) else 'raw'
if not inst.preload:
raise RuntimeError(
"By default, MNE does not load data into main memory to "
"conserve resources. " + msg + ' requires %s data to be '
'loaded. Use preload=True (or string) in the constructor or '
'%s.load_data().' % (name, name))
def _check_compensation_grade(info1, info2, name1,
name2='data', ch_names=None):
"""Ensure that objects have same compensation_grade."""
from ..io import Info
from ..io.pick import pick_channels, pick_info
from ..io.compensator import get_current_comp
for t_info in (info1, info2):
if t_info is None:
return
assert isinstance(t_info, Info), t_info # or internal code is wrong
if ch_names is not None:
info1 = info1.copy()
info2 = info2.copy()
# pick channels
for t_info in [info1, info2]:
if t_info['comps']:
t_info['comps'] = []
picks = pick_channels(t_info['ch_names'], ch_names)
pick_info(t_info, picks, copy=False)
# "or 0" here aliases None -> 0, as they are equivalent
grade1 = get_current_comp(info1) or 0
grade2 = get_current_comp(info2) or 0
# perform check
if grade1 != grade2:
raise RuntimeError(
'Compensation grade of %s (%s) and %s (%s) do not match'
% (name1, grade1, name2, grade2))
def _check_pylsl_installed(strict=True):
"""Aux function."""
try:
import pylsl
return pylsl
except ImportError:
if strict is True:
raise RuntimeError('For this functionality to work, the pylsl '
'library is required.')
else:
return False
def _check_pandas_installed(strict=True):
"""Aux function."""
try:
import pandas
return pandas
except ImportError:
if strict is True:
raise RuntimeError('For this functionality to work, the Pandas '
'library is required.')
else:
return False
def _check_pandas_index_arguments(index, valid):
"""Check pandas index arguments."""
if index is None:
return
if isinstance(index, str):
index = [index]
if not isinstance(index, list):
raise TypeError('index must be `None` or a string or list of strings,'
' got type {}.'.format(type(index)))
invalid = set(index) - set(valid)
if invalid:
plural = ('is not a valid option',
'are not valid options')[int(len(invalid) > 1)]
raise ValueError('"{}" {}. Valid index options are `None`, "{}".'
.format('", "'.join(invalid), plural,
'", "'.join(valid)))
return index
def _check_time_format(time_format, valid, meas_date=None):
"""Check time_format argument."""
if time_format not in valid and time_format is not None:
valid_str = '", "'.join(valid)
raise ValueError('"{}" is not a valid time format. Valid options are '
'"{}" and None.'.format(time_format, valid_str))
# allow datetime only if meas_date available
if time_format == 'datetime' and meas_date is None:
warn("Cannot convert to Datetime when raw.info['meas_date'] is "
"None. Falling back to Timedelta.")
time_format = 'timedelta'
return time_format
def _check_ch_locs(chs):
"""Check if channel locations exist.
Parameters
----------
chs : dict
The channels from info['chs']
"""
locs3d = np.array([ch['loc'][:3] for ch in chs])
return not ((locs3d == 0).all() or
(~np.isfinite(locs3d)).all() or
np.allclose(locs3d, 0.))
def _is_numeric(n):
return isinstance(n, (np.integer, np.floating, int, float))
class _IntLike(object):
@classmethod
def __instancecheck__(cls, other):
try:
_ensure_int(other)
except TypeError:
return False
else:
return True
int_like = _IntLike()
path_like = (str, Path)
class _Callable(object):
@classmethod
def __instancecheck__(cls, other):
return callable(other)
_multi = {
'str': (str,),
'numeric': (np.floating, float, int_like),
'path-like': path_like,
'int-like': (int_like,),
'callable': (_Callable(),),
}
try:
_multi['path-like'] += (os.PathLike,)
except AttributeError: # only on 3.6+
try:
# At least make PyTest work
from py._path.common import PathBase
except Exception: # no py.path
pass
else:
_multi['path-like'] += (PathBase,)
def _validate_type(item, types=None, item_name=None, type_name=None):
"""Validate that `item` is an instance of `types`.
Parameters
----------
item : object
The thing to be checked.
types : type | str | tuple of types | tuple of str
The types to be checked against.
If str, must be one of {'int', 'str', 'numeric', 'info', 'path-like',
'callable'}.
item_name : str | None
Name of the item to show inside the error message.
type_name : str | None
Possible types to show inside the error message that the checked item
can be.
"""
if types == "int":
_ensure_int(item, name=item_name)
return # terminate prematurely
elif types == "info":
from mne.io import Info as types
if not isinstance(types, (list, tuple)):
types = [types]
check_types = sum(((type(None),) if type_ is None else (type_,)
if not isinstance(type_, str) else _multi[type_]
for type_ in types), ())
if not isinstance(item, check_types):
if type_name is None:
type_name = ['None' if cls_ is None else cls_.__name__
if not isinstance(cls_, str) else cls_
for cls_ in types]
if len(type_name) == 1:
type_name = type_name[0]
elif len(type_name) == 2:
type_name = ' or '.join(type_name)
else:
type_name[-1] = 'or ' + type_name[-1]
type_name = ', '.join(type_name)
_item_name = 'Item' if item_name is None else item_name
raise TypeError(f"{_item_name} must be an instance of {type_name}, "
f"got {type(item)} instead")
def _check_path_like(item):
"""Validate that `item` is `path-like`.
Parameters
----------
item : object
The thing to be checked.
Returns
-------
bool
``True`` if `item` is a `path-like` object; ``False`` otherwise.
"""
try:
_validate_type(item, types='path-like')
return True
except TypeError:
return False
def _check_if_nan(data, msg=" to be plotted"):
"""Raise if any of the values are NaN."""
if not np.isfinite(data).all():
raise ValueError("Some of the values {} are NaN.".format(msg))
def _check_info_inv(info, forward, data_cov=None, noise_cov=None):
"""Return good channels common to forward model and covariance matrices."""
from .. import pick_types
# get a list of all channel names:
fwd_ch_names = forward['info']['ch_names']
# handle channels from forward model and info:
ch_names = _compare_ch_names(info['ch_names'], fwd_ch_names, info['bads'])
# make sure that no reference channels are left:
ref_chs = pick_types(info, meg=False, ref_meg=True)
ref_chs = [info['ch_names'][ch] for ch in ref_chs]
ch_names = [ch for ch in ch_names if ch not in ref_chs]
# inform about excluding channels:
if (data_cov is not None and set(info['bads']) != set(data_cov['bads']) and
(len(set(ch_names).intersection(data_cov['bads'])) > 0)):
logger.info('info["bads"] and data_cov["bads"] do not match, '
'excluding bad channels from both.')
if (noise_cov is not None and
set(info['bads']) != set(noise_cov['bads']) and
(len(set(ch_names).intersection(noise_cov['bads'])) > 0)):
logger.info('info["bads"] and noise_cov["bads"] do not match, '
'excluding bad channels from both.')
# handle channels from data cov if data cov is not None
# Note: data cov is supposed to be None in tf_lcmv
if data_cov is not None:
ch_names = _compare_ch_names(ch_names, data_cov.ch_names,
data_cov['bads'])
# handle channels from noise cov if noise cov available:
if noise_cov is not None:
ch_names = _compare_ch_names(ch_names, noise_cov.ch_names,
noise_cov['bads'])
picks = [info['ch_names'].index(k) for k in ch_names if k in
info['ch_names']]
return picks
def _compare_ch_names(names1, names2, bads):
"""Return channel names of common and good channels."""
ch_names = [ch for ch in names1 if ch not in bads and ch in names2]
return ch_names
def _check_channels_spatial_filter(ch_names, filters):
"""Return data channel indices to be used with spatial filter.
Unlike ``pick_channels``, this respects the order of ch_names.
"""
sel = []
# first check for channel discrepancies between filter and data:
for ch_name in filters['ch_names']:
if ch_name not in ch_names:
raise ValueError('The spatial filter was computed with channel %s '
'which is not present in the data. You should '
'compute a new spatial filter restricted to the '
'good data channels.' % ch_name)
# then compare list of channels and get selection based on data:
sel = [ii for ii, ch_name in enumerate(ch_names)
if ch_name in filters['ch_names']]
return sel
def _check_rank(rank):
"""Check rank parameter."""
_validate_type(rank, (None, dict, str), 'rank')
if isinstance(rank, str):
if rank not in ['full', 'info']:
raise ValueError('rank, if str, must be "full" or "info", '
'got %s' % (rank,))
return rank
def _check_one_ch_type(method, info, forward, data_cov=None, noise_cov=None):
"""Check number of sensor types and presence of noise covariance matrix."""
from ..cov import make_ad_hoc_cov, Covariance
from ..time_frequency.csd import CrossSpectralDensity
from ..io.pick import pick_info
from ..channels.channels import _contains_ch_type
if isinstance(data_cov, CrossSpectralDensity):
_validate_type(noise_cov, [None, CrossSpectralDensity], 'noise_cov')
# FIXME
picks = list(range(len(data_cov.ch_names)))
info_pick = info
else:
_validate_type(noise_cov, [None, Covariance], 'noise_cov')
picks = _check_info_inv(info, forward, data_cov=data_cov,
noise_cov=noise_cov)
info_pick = pick_info(info, picks)
ch_types =\
[_contains_ch_type(info_pick, tt) for tt in ('mag', 'grad', 'eeg')]
if sum(ch_types) > 1:
if noise_cov is None:
raise ValueError('Source reconstruction with several sensor types'
' requires a noise covariance matrix to be '
'able to apply whitening.')
if noise_cov is None:
noise_cov = make_ad_hoc_cov(info_pick, std=1.)
allow_mismatch = True
else:
noise_cov = noise_cov.copy()
if isinstance(noise_cov, Covariance) and 'estimator' in noise_cov:
del noise_cov['estimator']
allow_mismatch = False
_validate_type(noise_cov, (Covariance, CrossSpectralDensity), 'noise_cov')
return noise_cov, picks, allow_mismatch
def _check_depth(depth, kind='depth_mne'):
"""Check depth options."""
from ..defaults import _handle_default
if not isinstance(depth, dict):
depth = dict(exp=None if depth is None else float(depth))
return _handle_default(kind, depth)
def _check_option(parameter, value, allowed_values, extra=''):
"""Check the value of a parameter against a list of valid options.
Return the value if it is valid, otherwise raise a ValueError with a
readable error message.
Parameters
----------
parameter : str
The name of the parameter to check. This is used in the error message.
value : any type
The value of the parameter to check.
allowed_values : list
The list of allowed values for the parameter.
extra : str
Extra string to append to the invalid value sentence, e.g.
"when using ico mode".
Raises
------
ValueError
When the value of the parameter is not one of the valid options.
Returns
-------
value : any type
The value if it is valid.
"""
if value in allowed_values:
return value
# Prepare a nice error message for the user
extra = ' ' + extra if extra else extra
msg = ("Invalid value for the '{parameter}' parameter{extra}. "
'{options}, but got {value!r} instead.')
allowed_values = list(allowed_values) # e.g., if a dict was given
if len(allowed_values) == 1:
options = f'The only allowed value is {repr(allowed_values[0])}'
else:
options = 'Allowed values are '
options += ', '.join([f'{repr(v)}' for v in allowed_values[:-1]])
options += f', and {repr(allowed_values[-1])}'
raise ValueError(msg.format(parameter=parameter, options=options,
value=value, extra=extra))
def _check_all_same_channel_names(instances):
"""Check if a collection of instances all have the same channels."""
ch_names = instances[0].info["ch_names"]
for inst in instances:
if ch_names != inst.info["ch_names"]:
return False
return True
def _check_combine(mode, valid=('mean', 'median', 'std')):
if mode == "mean":
def fun(data):
return np.mean(data, axis=0)
elif mode == "std":
def fun(data):
return np.std(data, axis=0)
elif mode == "median" or mode == np.median:
def fun(data):
return _median_complex(data, axis=0)
elif callable(mode):
fun = mode
else:
raise ValueError("Combine option must be " + ", ".join(valid) +
" or callable, got %s (type %s)." %
(mode, type(mode)))
return fun
def _check_src_normal(pick_ori, src):
from ..source_space import SourceSpaces
_validate_type(src, SourceSpaces, 'src')
if pick_ori == 'normal' and src.kind not in ('surface', 'discrete'):
raise RuntimeError('Normal source orientation is supported only for '
'surface or discrete SourceSpaces, got type '
'%s' % (src.kind,))
def _check_stc_units(stc, threshold=1e-7): # 100 nAm threshold for warning
max_cur = np.max(np.abs(stc.data))
if max_cur > threshold:
warn('The maximum current magnitude is %0.1f nAm, which is very large.'
' Are you trying to apply the forward model to noise-normalized '
'(dSPM, sLORETA, or eLORETA) values? The result will only be '
'correct if currents (in units of Am) are used.'
% (1e9 * max_cur))
def _check_pyqt5_version():
bad = True
try:
from PyQt5.Qt import PYQT_VERSION_STR as version
except Exception:
version = 'unknown'
else:
if LooseVersion(version) >= LooseVersion('5.10'):
bad = False
bad &= sys.platform == 'darwin'
if bad:
warn('macOS users should use PyQt5 >= 5.10 for GUIs, got %s. '
'Please upgrade e.g. with:\n\n'
' pip install "PyQt5>=5.10,<5.14"\n'
% (version,))
return version
def _check_sphere(sphere, info=None, sphere_units='m'):
from ..defaults import HEAD_SIZE_DEFAULT
from ..bem import fit_sphere_to_headshape, ConductorModel, get_fitting_dig
if sphere is None:
sphere = HEAD_SIZE_DEFAULT
if info is not None:
# Decide if we have enough dig points to do the auto fit
try:
get_fitting_dig(info, 'extra', verbose='error')
except (RuntimeError, ValueError):
pass
else:
sphere = 'auto'
if isinstance(sphere, str):
if sphere != 'auto':
raise ValueError('sphere, if str, must be "auto", got %r'
% (sphere))
R, r0, _ = fit_sphere_to_headshape(info, verbose=False, units='m')
sphere = tuple(r0) + (R,)
sphere_units = 'm'
elif isinstance(sphere, ConductorModel):
if not sphere['is_sphere'] or len(sphere['layers']) == 0:
raise ValueError('sphere, if a ConductorModel, must be spherical '
'with multiple layers, not a BEM or single-layer '
'sphere (got %s)' % (sphere,))
sphere = tuple(sphere['r0']) + (sphere['layers'][0]['rad'],)
sphere_units = 'm'
sphere = np.array(sphere, dtype=float)
if sphere.shape == ():
sphere = np.concatenate([[0.] * 3, [sphere]])
if sphere.shape != (4,):
raise ValueError('sphere must be float or 1D array of shape (4,), got '
'array-like of shape %s' % (sphere.shape,))
_check_option('sphere_units', sphere_units, ('m', 'mm'))
if sphere_units == 'mm':
sphere /= 1000.
sphere = np.array(sphere, float)
return sphere
def _check_freesurfer_home():
from .config import get_config
fs_home = get_config('FREESURFER_HOME')
if fs_home is None:
raise RuntimeError(
'The FREESURFER_HOME environment variable is not set.')
return fs_home
def _suggest(val, options, cutoff=0.66):
options = get_close_matches(val, options, cutoff=cutoff)
if len(options) == 0:
return ''
elif len(options) == 1:
return ' Did you mean %r?' % (options[0],)
else:
return ' Did you mean one of %r?' % (options,)
def _check_on_missing(on_missing, name='on_missing'):
_validate_type(on_missing, str, name)
_check_option(name, on_missing, ['raise', 'warn', 'ignore'])
def _on_missing(on_missing, msg, name='on_missing', error_klass=None):
_check_on_missing(on_missing, name)
error_klass = ValueError if error_klass is None else error_klass
on_missing = 'raise' if on_missing == 'error' else on_missing
on_missing = 'warn' if on_missing == 'warning' else on_missing
if on_missing == 'raise':
raise error_klass(msg)
elif on_missing == 'warn':
warn(msg)
else: # Ignore
assert on_missing == 'ignore'
def _safe_input(msg, *, alt=None, use=None):
try:
return input(msg)
except EOFError: # MATLAB or other non-stdin
if use is not None:
return use
raise RuntimeError(
f'Could not use input() to get a response to:\n{msg}\n'
f'You can {alt} to avoid this error.')
def _ensure_events(events):
events_type = type(events)
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore') # deprecation for object array
events = np.asarray(events)
if not np.issubdtype(events.dtype, np.integer):
raise TypeError('events should be a NumPy array of integers, '
f'got {events_type}')
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError(
f'events must be of shape (N, 3), got {events.shape}')
return events
|
kambysese/mne-python
|
mne/utils/check.py
|
Python
|
bsd-3-clause
| 27,288
|
"""Monitor and influence the optimization procedure via callbacks.
Callbacks are callables which are invoked after each iteration of the optimizer
and are passed the results "so far". Callbacks can monitor progress, or stop
the optimization early by returning `True`.
"""
try:
from collections.abc import Callable
except ImportError:
from collections import Callable
from time import time
import numpy as np
from skopt.utils import dump
def check_callback(callback):
"""
Check if callback is a callable or a list of callables.
"""
if callback is not None:
if isinstance(callback, Callable):
return [callback]
elif (isinstance(callback, list) and
all([isinstance(c, Callable) for c in callback])):
return callback
else:
raise ValueError("callback should be either a callable or "
"a list of callables.")
else:
return []
class VerboseCallback(object):
"""
Callback to control the verbosity.
Parameters
----------
n_init : int, optional
Number of points provided by the user which are yet to be
evaluated. This is equal to `len(x0)` when `y0` is None
n_random : int, optional
Number of points randomly chosen.
n_total : int
Total number of func calls.
Attributes
----------
iter_no : int
Number of iterations of the optimization routine.
"""
def __init__(self, n_total, n_init=0, n_random=0):
self.n_init = n_init
self.n_random = n_random
self.n_total = n_total
self.iter_no = 1
self._start_time = time()
self._print_info(start=True)
def _print_info(self, start=True):
iter_no = self.iter_no
if start:
status = "started"
eval_status = "Evaluating function"
search_status = "Searching for the next optimal point."
else:
status = "ended"
eval_status = "Evaluation done"
search_status = "Search finished for the next optimal point."
if iter_no <= self.n_init:
print("Iteration No: %d %s. %s at provided point."
% (iter_no, status, eval_status))
elif self.n_init < iter_no <= (self.n_random + self.n_init):
print("Iteration No: %d %s. %s at random point."
% (iter_no, status, eval_status))
else:
print("Iteration No: %d %s. %s"
% (iter_no, status, search_status))
def __call__(self, res):
"""
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
"""
time_taken = time() - self._start_time
self._print_info(start=False)
curr_y = res.func_vals[-1]
curr_min = res.fun
print("Time taken: %0.4f" % time_taken)
print("Function value obtained: %0.4f" % curr_y)
print("Current minimum: %0.4f" % curr_min)
self.iter_no += 1
if self.iter_no <= self.n_total:
self._print_info(start=True)
self._start_time = time()
class TimerCallback(object):
"""
Log the elapsed time between each iteration of the minimization loop.
The time for each iteration is stored in the `iter_time` attribute which
you can inspect after the minimization has completed.
Attributes
----------
iter_time : list, shape (n_iter,)
`iter_time[i-1]` gives the time taken to complete iteration `i`
"""
def __init__(self):
self._time = time()
self.iter_time = []
def __call__(self, res):
"""
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
"""
elapsed_time = time() - self._time
self.iter_time.append(elapsed_time)
self._time = time()
class EarlyStopper(object):
"""Decide to continue or not given the results so far.
The optimization procedure will be stopped if the callback returns True.
"""
def __call__(self, result):
"""
Parameters
----------
result : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
"""
return self._criterion(result)
def _criterion(self, result):
"""Compute the decision to stop or not.
Classes inheriting from `EarlyStop` should use this method to
implement their decision logic.
Parameters
----------
result : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
Returns
-------
decision : boolean or None
Return True/False if the criterion can make a decision or `None` if
there is not enough data yet to make a decision.
"""
raise NotImplementedError("The _criterion method should be implemented"
" by subclasses of EarlyStopper.")
class DeltaXStopper(EarlyStopper):
"""Stop the optimization when ``|x1 - x2| < delta``
If the last two positions at which the objective has been evaluated
are less than `delta` apart stop the optimization procedure.
"""
def __init__(self, delta):
super(EarlyStopper, self).__init__()
self.delta = delta
def _criterion(self, result):
if len(result.x_iters) >= 2:
return result.space.distance(result.x_iters[-2],
result.x_iters[-1]) < self.delta
else:
return None
class DeltaYStopper(EarlyStopper):
"""Stop the optimization if the `n_best` minima are within `delta`
Stop the optimizer if the absolute difference between the `n_best`
objective values is less than `delta`.
"""
def __init__(self, delta, n_best=5):
super(EarlyStopper, self).__init__()
self.delta = delta
self.n_best = n_best
def _criterion(self, result):
if len(result.func_vals) >= self.n_best:
func_vals = np.sort(result.func_vals)
worst = func_vals[self.n_best - 1]
best = func_vals[0]
# worst is always larger, so no need for abs()
return worst - best < self.delta
else:
return None
class HollowIterationsStopper(EarlyStopper):
"""
Stop if the improvement over the last n iterations is below a threshold.
"""
def __init__(self, n_iterations, threshold=0):
super(HollowIterationsStopper, self).__init__()
self.n_iterations = n_iterations
self.threshold = abs(threshold)
def _criterion(self, result):
if len(result.func_vals) <= self.n_iterations:
return False
cummin = np.minimum.accumulate(result.func_vals)
return cummin[-self.n_iterations - 1] - cummin[-1] <= self.threshold
class DeadlineStopper(EarlyStopper):
"""
Stop the optimization before running out of a fixed budget of time.
Attributes
----------
iter_time : list, shape (n_iter,)
`iter_time[i-1]` gives the time taken to complete iteration `i`
Parameters
----------
total_time : float
fixed budget of time (seconds) that the optimization must
finish within.
"""
def __init__(self, total_time):
super(DeadlineStopper, self).__init__()
self._time = time()
self.iter_time = []
self.total_time = total_time
def _criterion(self, result):
elapsed_time = time() - self._time
self.iter_time.append(elapsed_time)
self._time = time()
if result.x_iters:
time_remaining = self.total_time - np.sum(self.iter_time)
return time_remaining <= np.max(self.iter_time)
else:
return None
class ThresholdStopper(EarlyStopper):
"""
Stop the optimization when the objective value is lower
than the given threshold.
"""
def __init__(self, threshold: float) -> None:
super(EarlyStopper, self).__init__()
self.threshold = threshold
def _criterion(self, result) -> bool:
return np.any([val <= self.threshold for val in result.func_vals])
class CheckpointSaver(object):
"""
Save current state after each iteration with :class:`skopt.dump`.
Examples
--------
>>> import skopt
>>> def obj_fun(x):
... return x[0]**2
>>> checkpoint_callback = skopt.callbacks.CheckpointSaver("./result.pkl")
>>> skopt.gp_minimize(obj_fun, [(-2, 2)], n_calls=10,
... callback=[checkpoint_callback]) # doctest: +SKIP
Parameters
----------
checkpoint_path : string
location where checkpoint will be saved to;
dump_options : string
options to pass on to `skopt.dump`, like `compress=9`
"""
def __init__(self, checkpoint_path, **dump_options):
self.checkpoint_path = checkpoint_path
self.dump_options = dump_options
def __call__(self, res):
"""
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
"""
dump(res, self.checkpoint_path, **self.dump_options)
|
scikit-optimize/scikit-optimize
|
skopt/callbacks.py
|
Python
|
bsd-3-clause
| 9,377
|
"""Beatles Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
The Beatles Dataset includes beat and metric position, chord, key, and segmentation
annotations for 179 Beatles songs. Details can be found in http://matthiasmauch.net/_pdf/mauch_omp_2009.pdf and
http://isophonics.net/content/reference-annotations-beatles.
"""
import csv
import os
from typing import BinaryIO, Optional, TextIO, Tuple
from deprecated.sphinx import deprecated
import librosa
import numpy as np
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import annotations
from mirdata import io
BIBTEX = """@inproceedings{mauch2009beatles,
title={OMRAS2 metadata project 2009},
author={Mauch, Matthias and Cannam, Chris and Davies, Matthew and Dixon, Simon and Harte,
Christopher and Kolozali, Sefki and Tidhar, Dan and Sandler, Mark},
booktitle={12th International Society for Music Information Retrieval Conference},
year={2009},
series = {ISMIR}
}"""
INDEXES = {
"default": "1.2",
"test": "1.2",
"1.2": core.Index(filename="beatles_index_1.2.json"),
}
REMOTES = {
"annotations": download_utils.RemoteFileMetadata(
filename="The Beatles Annotations.tar.gz",
url="http://isophonics.net/files/annotations/The%20Beatles%20Annotations.tar.gz",
checksum="62425c552d37c6bb655a78e4603828cc",
destination_dir="annotations",
)
}
DOWNLOAD_INFO = """
Unfortunately the audio files of the Beatles dataset are not available
for download. If you have the Beatles dataset, place the contents into
a folder called Beatles with the following structure:
> Beatles/
> annotations/
> audio/
and copy the Beatles folder to {}
"""
LICENSE_INFO = (
"Unfortunately we couldn't find the license information for the Beatles dataset."
)
class Track(core.Track):
"""Beatles track class
Args:
track_id (str): track id of the track
data_home (str): path where the data lives
Attributes:
audio_path (str): track audio path
beats_path (str): beat annotation path
chords_path (str): chord annotation path
keys_path (str): key annotation path
sections_path (str): sections annotation path
title (str): title of the track
track_id (str): track id
Cached Properties:
beats (BeatData): human-labeled beat annotations
chords (ChordData): human-labeled chord annotations
key (KeyData): local key annotations
sections (SectionData): section annotations
"""
def __init__(
self,
track_id,
data_home,
dataset_name,
index,
metadata,
):
super().__init__(
track_id,
data_home,
dataset_name,
index,
metadata,
)
self.beats_path = self.get_path("beat")
self.chords_path = self.get_path("chords")
self.keys_path = self.get_path("keys")
self.sections_path = self.get_path("sections")
self.audio_path = self.get_path("audio")
self.title = os.path.basename(self._track_paths["sections"][0]).split(".")[0]
@core.cached_property
def beats(self) -> Optional[annotations.BeatData]:
return load_beats(self.beats_path)
@core.cached_property
def chords(self) -> Optional[annotations.ChordData]:
return load_chords(self.chords_path)
@core.cached_property
def key(self) -> Optional[annotations.KeyData]:
return load_key(self.keys_path)
@core.cached_property
def sections(self) -> Optional[annotations.SectionData]:
return load_sections(self.sections_path)
@property
def audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
def to_jams(self):
"""the track's data in jams format
Returns:
jams.JAMS: return track data in jam format
"""
return jams_utils.jams_converter(
audio_path=self.audio_path,
beat_data=[(self.beats, None)],
section_data=[(self.sections, None)],
chord_data=[(self.chords, None)],
key_data=[(self.key, None)],
metadata={"artist": "The Beatles", "title": self.title},
)
@io.coerce_to_bytes_io
def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load a Beatles audio file.
Args:
fhandle (str or file-like): path or file-like object pointing to an audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
return librosa.load(fhandle, sr=None, mono=True)
@io.coerce_to_string_io
def load_beats(fhandle: TextIO) -> annotations.BeatData:
"""Load Beatles format beat data from a file
Args:
fhandle (str or file-like): path or file-like object pointing to a beat annotation file
Returns:
BeatData: loaded beat data
"""
beat_times, beat_positions = [], []
dialect = csv.Sniffer().sniff(fhandle.read(1024))
fhandle.seek(0)
reader = csv.reader(fhandle, dialect)
for line in reader:
beat_times.append(float(line[0]))
beat_positions.append(line[-1])
beat_positions = _fix_newpoint(np.array(beat_positions)) # type: ignore
# After fixing New Point labels convert positions to int
beat_data = annotations.BeatData(
np.array(beat_times),
"s",
np.array([int(b) for b in beat_positions]),
"bar_index",
)
return beat_data
@io.coerce_to_string_io
def load_chords(fhandle: TextIO) -> annotations.ChordData:
"""Load Beatles format chord data from a file
Args:
fhandle (str or file-like): path or file-like object pointing to a chord annotation file
Returns:
ChordData: loaded chord data
"""
start_times, end_times, chords = [], [], []
dialect = csv.Sniffer().sniff(fhandle.read(1024))
fhandle.seek(0)
reader = csv.reader(fhandle, dialect)
for line in reader:
start_times.append(float(line[0]))
end_times.append(float(line[1]))
chords.append(line[2])
return annotations.ChordData(
np.array([start_times, end_times]).T, "s", chords, "harte"
)
@io.coerce_to_string_io
def load_key(fhandle: TextIO) -> annotations.KeyData:
"""Load Beatles format key data from a file
Args:
fhandle (str or file-like): path or file-like object pointing to a key annotation file
Returns:
KeyData: loaded key data
"""
start_times, end_times, keys = [], [], []
reader = csv.reader(fhandle, delimiter="\t")
for line in reader:
if line[2] == "Key":
start_times.append(float(line[0]))
end_times.append(float(line[1]))
keys.append(line[3])
return annotations.KeyData(
np.array([start_times, end_times]).T, "s", keys, "key_mode"
)
@io.coerce_to_string_io
def load_sections(fhandle: TextIO) -> annotations.SectionData:
"""Load Beatles format section data from a file
Args:
fhandle (str or file-like): path or file-like object pointing to a section annotation file
Returns:
SectionData: loaded section data
"""
start_times, end_times, sections = [], [], []
reader = csv.reader(fhandle, delimiter="\t")
for line in reader:
start_times.append(float(line[0]))
end_times.append(float(line[1]))
sections.append(line[3])
return annotations.SectionData(
np.array([start_times, end_times]).T, "s", sections, "open"
)
def _fix_newpoint(beat_positions: np.ndarray) -> np.ndarray:
"""Fills in missing beat position labels by inferring the beat position
from neighboring beats.
"""
while np.any(beat_positions == "New Point"):
idxs = np.where(beat_positions == "New Point")[0]
for i in idxs:
if i < len(beat_positions) - 1:
if not beat_positions[i + 1] == "New Point":
beat_positions[i] = str(np.mod(int(beat_positions[i + 1]) - 1, 4))
if i == len(beat_positions) - 1:
if not beat_positions[i - 1] == "New Point":
beat_positions[i] = str(np.mod(int(beat_positions[i - 1]) + 1, 4))
beat_positions[beat_positions == "0"] = "4"
return beat_positions
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The beatles dataset
"""
def __init__(self, data_home=None, version="default"):
super().__init__(
data_home,
version,
name="beatles",
track_class=Track,
bibtex=BIBTEX,
indexes=INDEXES,
remotes=REMOTES,
download_info=DOWNLOAD_INFO,
license_info=LICENSE_INFO,
)
@deprecated(
reason="Use mirdata.datasets.beatles.load_audio",
version="0.3.4",
)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.beatles.load_beats",
version="0.3.4",
)
def load_beats(self, *args, **kwargs):
return load_beats(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.beatles.load_chords",
version="0.3.4",
)
def load_chords(self, *args, **kwargs):
return load_chords(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.beatles.load_sections",
version="0.3.4",
)
def load_sections(self, *args, **kwargs):
return load_sections(*args, **kwargs)
|
mir-dataset-loaders/mirdata
|
mirdata/datasets/beatles.py
|
Python
|
bsd-3-clause
| 9,857
|
#!/usr/bin/env python
# A test file must start with a header, where each line starts with '// '.
# The first line of the header must be 'Plinth Test'.
# Successive lines can specify options for the test. Valid options are:
# Main: <type name>
# Uses the specified type name as the main type argument to the compiler.
# Compiler: <some text>
# Tests that the compiler outputs the specified text as a whole line, somewhere in its output.
# MustCompile
# Tests that the compiler succeeds for this file. Implied by options that test a run-time result.
# Args: <some arguments>
# Uses the specified argument list for running the compiled test program.
# ReturnCode: <number>
# Tests that running the program produces the specified return code.
# Out: <some text>
# Tests that the running compiled code produces exactly the specified text.
# If multiple "Out: " lines are used, the output must contain all lines in the order specified.
import sys
from subprocess import Popen,PIPE,call
import os
import tempfile
import shutil
PLINTH_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/..')
COMPILER_EXEC = PLINTH_DIR + '/plinth'
RUNTIME = PLINTH_DIR + '/runtime/runtime.bc'
TEST_LIB = PLINTH_DIR + '/test/TestLib.pth'
class Test:
def __init__(self, filename, mainTypeName, compilerLines, mustCompile, args, returnCode, outLines):
self.filename = filename
self.mainTypeName = mainTypeName
self.compilerLines = compilerLines
self.mustCompile = mustCompile
self.args = args
self.returnCode = returnCode
self.outLines = outLines
def parse_header(filename):
lines = []
with open(filename) as f:
lines = f.readlines()
header = []
for i in range(len(lines)):
if lines[i].startswith('// '):
header.append(lines[i][3:])
else:
break
if len(header) == 0:
print("A test must contain a header")
sys.exit(1)
if header[0].strip() != "Plinth Test":
print("A test must start with the plinth test header: \"// Plinth Test\"")
sys.exit(1)
mainTypeName = None
compilerLines = []
mustCompile = False
args = None
returnCode = None
outLines = None
for i in range(1, len(header)):
if header[i].startswith('Main: '):
if mainTypeName != None:
print("A test cannot declare two main types")
sys.exit(1)
mainTypeName = header[i][len('Main: '):].strip()
elif header[i].startswith('Compiler: '):
compilerLines.append(header[i][len('Compiler: '):])
elif header[i].startswith('MustCompile'):
mustCompile = True
elif header[i].startswith('Args: '):
mustCompile = True
if args != None:
print("A test cannot declare two argument lists")
sys.exit(1)
args = header[i][len('Args: '):].strip().split(' ')
elif header[i].startswith('ReturnCode: '):
mustCompile = True
if returnCode != None:
print("A test cannot declare two return codes")
sys.exit(1)
returnCode = int(header[i][len('ReturnCode: '):].strip())
elif header[i].startswith('Out: '):
mustCompile = True
if outLines == None:
outLines = []
outLines.append(header[i][len('Out: '):])
elif header[i].strip() != '':
print("Unrecognised header line: " + header[i])
sys.exit(1)
if mainTypeName == None and returnCode != None:
print("To test for a return code, please specify \"Main: \"")
sys.exit(1)
if mainTypeName == None and outLines != None:
print("To test an executable's output, please specify \"Main: \"")
sys.exit(1)
if mainTypeName == None and args != None:
print("Cannot provide arguments to a test without a main type")
sys.exit(1)
if args != None and returnCode == None and outLines == None:
print("Cannot provide arguments to a test with no expected results")
sys.exit(1)
if compilerLines == [] and mustCompile == False:
print("No tests specified!")
sys.exit(1)
return Test(filename, mainTypeName, compilerLines, mustCompile, args, returnCode, outLines)
def run_compiler(test):
work_dir = test.work_dir
test.compiled_base = work_dir + "/compiled"
if test.mainTypeName != None:
test.compiled_base = work_dir + "/" + test.mainTypeName
bitcode_file = test.compiled_base + ".pbc"
args = [COMPILER_EXEC, '-o', bitcode_file, '-l', RUNTIME]
if test.mainTypeName != None:
args += ['-m', test.mainTypeName]
args += [TEST_LIB]
args += [test.filename]
proc = Popen(args, stderr=PIPE)
stdout, stderr = proc.communicate()
error_lines = stderr.decode('utf-8').splitlines(True)
basename = os.path.basename(test.filename)
compilerLines = [x.replace(basename, test.filename) for x in test.compilerLines]
for i in range(len(error_lines)):
for j in range(len(compilerLines)):
if error_lines[i].strip() == compilerLines[j].strip():
del compilerLines[j]
break
if len(compilerLines) > 0:
print("Fail: " + test.filename)
print(" Failed to produce the compiler output lines:")
for i in range(len(compilerLines)):
print(" " + compilerLines[i].strip())
print(" Actual compiler output was:")
for i in range(len(error_lines)):
print(" > " + error_lines[i], end="")
shutil.rmtree(work_dir)
sys.exit(2)
if proc.returncode != 0:
if test.mustCompile:
print("Fail: " + test.filename)
print(" Failed to compile! Compiler output:")
for i in range(len(error_lines)):
print(" > " + error_lines[i], end="")
shutil.rmtree(work_dir)
sys.exit(2)
else:
# Success, since the test must have been on the compiler output rather than the runtime output
return
assembly_file = test.compiled_base + ".s"
verifyResult = call(['llc', bitcode_file, '-o', assembly_file])
if verifyResult != 0:
print("Fail: " + test.filename)
print(" Failed to generate assembly from source file")
shutil.rmtree(work_dir)
sys.exit(2)
def run_program(test):
work_dir = test.work_dir
assembly_file = test.compiled_base + ".s"
executable_file = test.compiled_base
gccResult = call(['gcc', assembly_file, '-o', executable_file])
if gccResult != 0:
print("Fail: " + test.filename)
print(" Failed to generate an executable from the assembly code")
shutil.rmtree(work_dir)
sys.exit(2)
args = test.args
if args == None:
args = [test.mainTypeName]
proc = Popen(args, executable=executable_file, stdout=PIPE)
stdout, stderr = proc.communicate()
if test.returnCode != None:
if proc.returncode != test.returnCode:
print("Fail: " + test.filename)
print(" Return code mismatch. Expected: " + str(test.returnCode) + " but got: " + str(proc.returncode))
shutil.rmtree(work_dir)
sys.exit(3)
output_lines = stdout.decode('utf-8').splitlines(True)
output_failure = False
if test.outLines != None:
if len(output_lines) != len(test.outLines):
print("Fail: " + test.filename)
print(" Number of output lines differs (expected: " + str(len(test.outLines)) + " but got: " + str(len(output_lines)) + ")")
output_failure = True
else:
for i in range(len(output_lines)):
if output_lines[i] != test.outLines[i]:
print("Fail: " + test.filename)
print(" Output line {} differs. Expected:".format(i+1))
print(" " + test.outLines[i], end="")
print(" But got:")
print(" " + output_lines[i], end="")
output_failure = True
break
if output_failure:
print(" Actual program output was:")
for i in range(len(output_lines)):
print(" > " + output_lines[i], end="")
shutil.rmtree(work_dir)
sys.exit(3)
if __name__ == "__main__":
filename = sys.argv[1]
test = parse_header(filename)
test.work_dir = tempfile.mkdtemp()
run_compiler(test)
if test.mainTypeName != None and (test.returnCode != None or test.outLines != None):
run_program(test)
# Success!
print("Passed: " + test.filename)
shutil.rmtree(test.work_dir)
|
abryant/Plinth
|
test/runtest.py
|
Python
|
bsd-3-clause
| 8,056
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rvws.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
gardenm/hypetra.in
|
rvws/manage.py
|
Python
|
bsd-3-clause
| 247
|
"""Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred, sample_weight=sample_weight)
return (n_differences / (y_true.shape[0] * len(classes) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
y_pred = check_array(y_pred, ensure_2d=False)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
chrisburr/scikit-learn
|
sklearn/metrics/classification.py
|
Python
|
bsd-3-clause
| 68,445
|
import re
from threading import Thread
import rostopic
from BAL.Handlers.keepAliveHandler import KeepAliveHandler
from BAL.Header.Requests.PublishRequest import PublishRequest
from BAL.Header.Response.ParamBuildResponse import URF_HRLV
__author__ = 'tom1231'
from BAL.Interfaces.Device import Device
from rospy import Publisher
import rospy
from sensor_msgs.msg import Range
MIN_RANGE_URF_LV_MaxSonar = 0.16
MAX_RANGE_URF_LV_MaxSonar = 6.45
FIELD_OF_VIEW_URF_LV_MaxSonar = 0.7
MIN_RANGE_URF_HRLV_MaxSonar = 0.3
MAX_RANGE_URF_HRLV_MaxSonar = 5.0
FIELD_OF_VIEW_URF_HRLV_MaxSonar = 0.7
class RiCURF(Device):
def __init__(self, devId, param, output):
Device.__init__(self, param.getURFName(devId), output)
self._urfType = param.getURFType(devId)
self._frameId = param.getURFFrameId(devId)
self._pub = Publisher('%s' % self._name, Range, queue_size=param.getURFPubHz(devId))
#KeepAliveHandler(self._name, Range)
self._devId = devId
self._haveRightToPublish = False
def getType(self): return self._urfType
def publish(self, data):
msg = Range()
msg.header.stamp = rospy.get_rostime()
msg.header.frame_id = self._frameId
if self._urfType == URF_HRLV:
msg.min_range = MIN_RANGE_URF_HRLV_MaxSonar
msg.max_range = MAX_RANGE_URF_HRLV_MaxSonar
msg.field_of_view = FIELD_OF_VIEW_URF_HRLV_MaxSonar
else:
msg.min_range = MIN_RANGE_URF_LV_MaxSonar
msg.max_range = MAX_RANGE_URF_LV_MaxSonar
msg.field_of_view = FIELD_OF_VIEW_URF_LV_MaxSonar
msg.radiation_type = Range.ULTRASOUND
msg.range = data
self._pub.publish(msg)
def checkForSubscribers(self):
try:
subCheck = re.search('Subscribers:.*', rostopic.get_info_text(self._pub.name)).group(0).split(': ')[1]
if not self._haveRightToPublish and subCheck == '':
self._output.write(PublishRequest(self.getType(), self._devId, True).dataTosend())
self._haveRightToPublish = True
elif self._haveRightToPublish and subCheck == 'None':
self._output.write(PublishRequest(self.getType(), self._devId, False).dataTosend())
self._haveRightToPublish = False
except: pass
|
robotican/ric
|
ric_board/scripts/RiCTraffic/BAL/Devices/RiCURF.py
|
Python
|
bsd-3-clause
| 2,326
|
import sys
import traceback
from django.conf import settings
class AppNotFoundError(Exception):
pass
class ClassNotFoundError(Exception):
pass
def get_class(module_label, classname):
return get_classes(module_label, [classname, ])[0]
def get_classes(module_label, classnames):
""" Imports a set of classes from a given module.
Usage::
get_classes('forum.models', ['Forum', 'ForumReadTrack', ])
"""
app_label = module_label.split('.')[0]
app_module_path = _get_app_module_path(module_label)
if not app_module_path:
raise AppNotFoundError('No app found matching \'{}\''.format(module_label))
# Determines the full module path by appending the module label
# to the base package path of the considered application.
module_path = app_module_path
if '.' in app_module_path:
base_package = app_module_path.rsplit('.' + app_label, 1)[0]
module_path = '{}.{}'.format(base_package, module_label)
# Try to import this module from the related app that is specified
# in the Django settings.
local_imported_module = _import_module(module_path, classnames)
# If the module we tried to import is not located inside the machina
# vanilla apps, try to import it from the corresponding machina app.
machina_imported_module = None
if not app_module_path.startswith('machina.apps'):
machina_imported_module = _import_module(
'{}.{}'.format('machina.apps', module_label), classnames,
)
if local_imported_module is None and machina_imported_module is None:
raise AppNotFoundError('Error importing \'{}\''.format(module_path))
# Any local module is prioritized over the corresponding machina module
imported_modules = [
m for m in (local_imported_module, machina_imported_module) if m is not None
]
return _pick_up_classes(imported_modules, classnames)
def _import_module(module_path, classnames):
""" Tries to import the given Python module path. """
try:
imported_module = __import__(module_path, fromlist=classnames)
return imported_module
except ImportError:
# In case of an ImportError, the module being loaded generally does not exist. But an
# ImportError can occur if the module being loaded exists and another import located inside
# it failed.
#
# In order to provide a meaningfull traceback, the execution information can be inspected in
# order to determine which case to consider. If the execution information provides more than
# a certain amount of frames, this means that an ImportError occured while loading the
# initial Python module.
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 1:
raise
def _pick_up_classes(modules, classnames):
""" Given a list of class names to retrieve, try to fetch them from the specified list of
modules and returns the list of the fetched classes.
"""
klasses = []
for classname in classnames:
klass = None
for module in modules:
if hasattr(module, classname):
klass = getattr(module, classname)
break
if not klass:
raise ClassNotFoundError('Error fetching \'{}\' in {}'.format(
classname, str([module.__name__ for module in modules]))
)
klasses.append(klass)
return klasses
def _get_app_module_path(module_label):
""" Given a module label, loop over the apps specified in the INSTALLED_APPS to find the
corresponding application module path.
"""
app_name = module_label.rsplit('.', 1)[0]
for app in settings.INSTALLED_APPS:
if app.endswith('.' + app_name) or app == app_name:
return app
return None
|
ellmetha/django-machina
|
machina/core/loading.py
|
Python
|
bsd-3-clause
| 3,900
|
from __future__ import absolute_import
from .base import *
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": env.db('sqlite://:memory:'),
}
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
########## END EMAIL CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
# CACHES = {
# 'default': env.cache_url_config('locmem://'),
# }
########## END CACHE CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING['loggers'] = {
'': {
'handlers': ['file', 'console'],
'level': 'DEBUG'
}
}
########## END LOGGING CONFIGURATION
|
openpolis/op-accesso
|
project/accesso/settings/test.py
|
Python
|
bsd-3-clause
| 934
|
# -*- coding: utf-8 -*-
from widelanguagedemo.app import create_app
from widelanguagedemo.settings import ProdConfig, DevConfig
def test_production_config():
app = create_app(ProdConfig)
assert app.config['ENV'] == 'prod'
assert app.config['DEBUG'] is False
assert app.config['DEBUG_TB_ENABLED'] is False
assert app.config['ASSETS_DEBUG'] is False
def test_dev_config():
app = create_app(DevConfig)
assert app.config['ENV'] == 'dev'
assert app.config['DEBUG'] is True
assert app.config['ASSETS_DEBUG'] is True
|
larsyencken/wide-language-demo
|
tests/test_config.py
|
Python
|
bsd-3-clause
| 548
|
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine_url = 'sqlite:///good-happens.db'
if 'DB_URL' in os.environ:
engine_url = os.environ['DB_URL']
engine = create_engine(engine_url, convert_unicode=True)
session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = session.query_property()
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import server.models as models
Base.metadata.create_all(bind=engine)
|
wastevensv/good-happens
|
src/server/database.py
|
Python
|
bsd-3-clause
| 838
|
class ContactData(SQLObject):
_connection = SQLiteConnection('slingshotsms.db')
# in reference to
# http://en.wikipedia.org/wiki/VCard
TEL = StringCol()
UID = StringCol()
PHOTO = BLOBCol()
N = StringCol()
FN = StringCol()
# contains all data as serialized vc, including the above columns
data = BLOBCol()
# TODO: support other formats + limit the list
def contact_list(self, limit = 200, format = 'json', q = False, timestamp = 0):
if q:
import base64
contacts = ContactData.select()
return "\n".join(["%s <%s>" % (contact.FN, contact.TEL) for contact in contacts])
else:
import base64
contacts = ContactData.select()
return json.dumps([{
'TEL': contact.TEL,
'N': contact.N,
'UID': contact.UID,
'PHOTO': base64.b64encode(contact.PHOTO),
'FN': contact.FN,} for contact in contacts])
contact_list.exposed = True
def import_vcard(self, vcard_file = ''):
""" given a vcard_file FieldStorage object, import vCards """
try:
vs = vobject.readComponents(vcard_file.value)
for v in vs:
# TODO: filter out contacts that don't have a telephone number
ContactData(FN=v.fn.value,
TEL=v.tel.value,
PHOTO=v.photo.value,
UID='blah', #TODO: implement UID checking / generation
N="%s %s" % (v.n.value.given, v.n.value.family),
data=str(v.serialize()))
return 'Contact saved'
except Exception, e:
print e
return "This contact could not be saved"
import_vcard.exposed = True
def export_vcard(self):
contacts = ContactData.select()
contact_string = "\n".join([contact.data for contact in contacts])
cherrypy.response.headers['Content-Disposition'] = "attachment; filename=vCards.vcf"
return contact_string
export_vcard.exposed = True
|
developmentseed/slingshotSMS
|
messagesystem/messagesystem.py
|
Python
|
bsd-3-clause
| 2,124
|
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import
from subprocess import Popen, PIPE
import logging
import os
logger = logging.getLogger(__name__)
class Script(object):
def __init__(self, name, path):
# Identifier for the script. Usually the file name, but doesn't need
# to be
self.name = name
# path of the actual script file, either absolute or relative to CWD
self.path = path
# Placeholder for subprocess object.
self.popen = None
def run(self):
"""Starts runnnig the script"""
logger.debug('Running script %s', self.path)
try:
self.popen = Popen(self.path, stdout=PIPE)
except OSError as e:
logger.exception('Error running %s' % self.path)
self.popen = None
return self.popen
def ran(self):
"""Indicates whether the script has ran succesfully"""
return self.popen is not None
def communicate(self, *args, **kwargs):
"""Calls communicate on the Popen object. For more info see the
subprocess documentation
"""
if self.popen:
logger.debug('Reading output of %s', self.path)
self.out, self.err = self.popen.communicate(*args, **kwargs)
return (self.out, self.err)
def kill(self, *args, **kwargs):
"""Calls kill on the Popen object. For more info see the subprocess
documentation
"""
if self.popen:
logger.debug('Killing script %s', self.path)
return self.popen.kill(*args, **kwargs)
def is_valid(self):
"""Checks whether to run a script. Right now we only check whether it
is writtable by others for security.
"""
stat = os.stat(self.path)
can_be_written_by_others = bool(stat.st_mode & 0002)
return not can_be_written_by_others
|
roac-monitoring/roac-agent
|
roac/script.py
|
Python
|
bsd-3-clause
| 1,908
|
def catch1():
try:
blah()
except Exception as e:
print(e) #, e.args)
def catch2():
try:
raise BaseException()
except Exception as e:
print(e) #, e.args)
def catch3():
try:
raise BaseException()
except:
pass
catch1()
#catch2()
# catch3()
|
talapus/Ophidian
|
Exceptions.py
|
Python
|
bsd-3-clause
| 318
|
from datetime import date
import logging
import pytz
from sqlalchemy import and_, desc, func, or_
from sqlalchemy.orm.exc import DetachedInstanceError
from werkzeug.exceptions import BadRequest, NotFound, Conflict, InternalServerError
from rdr_service import config
from rdr_service.clock import CLOCK
from rdr_service.dao.api_user_dao import ApiUserDao
from rdr_service.dao.base_dao import UpdatableDao
from rdr_service.lib_fhir.fhirclient_4_0_0.models.codeableconcept import CodeableConcept
from rdr_service.lib_fhir.fhirclient_4_0_0.models.fhirabstractbase import FHIRValidationError
from rdr_service.lib_fhir.fhirclient_4_0_0.models.fhirdate import FHIRDate
from rdr_service.lib_fhir.fhirclient_4_0_0.models.fhirreference import FHIRReference
from rdr_service.lib_fhir.fhirclient_4_0_0.models.extension import Extension
from rdr_service.lib_fhir.fhirclient_4_0_0.models.humanname import HumanName
from rdr_service.lib_fhir.fhirclient_4_0_0.models.identifier import Identifier
from rdr_service.lib_fhir.fhirclient_4_0_0.models.observation import Observation
from rdr_service.lib_fhir.fhirclient_4_0_0.models.reference import Reference
from rdr_service.model.api_user import ApiUser
from rdr_service.model.deceased_report import DeceasedReport
from rdr_service.model.organization import Organization
from rdr_service.model.participant import Participant
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.model.utils import to_client_participant_id
from rdr_service.participant_enums import DeceasedNotification, DeceasedReportDenialReason, DeceasedReportStatus,\
DeceasedStatus, SuspensionStatus, WithdrawalStatus
class DeceasedReportDao(UpdatableDao):
validate_version_match = False
status_map = {
'preliminary': DeceasedReportStatus.PENDING,
'final': DeceasedReportStatus.APPROVED,
'cancelled': DeceasedReportStatus.DENIED
}
def __init__(self):
super().__init__(DeceasedReport)
def _is_future_datetime(self, incoming_datetime):
utc_now = self._convert_to_utc_datetime(CLOCK.now())
utc_incoming_datetime = self._convert_to_utc_datetime(incoming_datetime)
return utc_now < utc_incoming_datetime
@staticmethod
def _is_future_date(incoming_date):
return date.today() < incoming_date
def _read_report_status(self, observation: Observation):
if observation.status is None:
raise BadRequest('Missing required field: status')
if observation.status not in self.status_map:
raise BadRequest(f'Invalid status "{observation.status}"')
return self.status_map[observation.status]
@staticmethod
def _find_class_in_array(cls, array):
for item in array:
if isinstance(item, cls):
return item
return None
def _read_api_request_author(self, observation: Observation):
user_reference = self._find_class_in_array(Reference, observation.performer)
if user_reference is None:
raise BadRequest('Performer reference for authoring user required')
return ApiUserDao().load_or_init(user_reference.type, user_reference.reference)
@staticmethod
def _read_authored_timestamp(observation: Observation):
if observation.issued is None:
raise BadRequest('Report issued date is required')
return observation.issued.date
@staticmethod
def _read_encounter(observation: Observation, report): # Get notification data
if observation.encounter is None:
raise BadRequest('Encounter information required for deceased report')
encounter = observation.encounter
if encounter.reference is None:
raise BadRequest('Invalid encounter information')
report.notification = DeceasedNotification(encounter.reference)
if report.notification == DeceasedNotification.OTHER:
if encounter.display is None:
raise BadRequest('Encounter display description text required when OTHER is set')
report.notificationOther = encounter.display
@staticmethod
def _read_reporter_data(observation: Observation, report):
extensions = observation.extension
if extensions is None or\
not isinstance(extensions, list) or\
len(extensions) == 0:
raise BadRequest('Reporter extension data is required')
extension = extensions[0]
if extension.valueHumanName is None:
raise BadRequest('Reporter HumanName data is required')
human_name = extension.valueHumanName
if human_name.text is None:
raise BadRequest('Missing reporter name')
report.reporterName = human_name.text
if human_name.extension is None:
raise BadRequest('Missing reporter extensions')
reporter_extensions = human_name.extension
if not isinstance(reporter_extensions, list):
raise BadRequest('Invalid reporter extensions')
for reporter_extension in reporter_extensions:
if reporter_extension.url == 'http://hl7.org/fhir/ValueSet/relatedperson-relationshiptype':
report.reporterRelationship = reporter_extension.valueCode
elif reporter_extension.url == 'https://www.pmi-ops.org/email-address':
report.reporterEmail = reporter_extension.valueString
elif reporter_extension.url == 'https://www.pmi-ops.org/phone-number':
report.reporterPhone = reporter_extension.valueString
if report.reporterRelationship is None: # If this is unset still then it must have not been provided
raise BadRequest('Reporter association is required')
def _read_denial_extension(self, observation: Observation, report):
if observation.extension is None:
raise BadRequest('Report denial information missing')
denial_extension = self._find_class_in_array(Extension, observation.extension)
if denial_extension.valueReference is None:
raise BadRequest('Report denial information missing')
denial_reference = denial_extension.valueReference
report.denialReason = DeceasedReportDenialReason(denial_reference.reference)
if report.denialReason == DeceasedReportDenialReason.OTHER:
report.denialReasonOther = denial_reference.display
def _load_participant(self, participant_id):
with self.session() as session:
participant = session.query(Participant).filter(
Participant.participantId == participant_id
).one_or_none()
if participant is None:
raise NotFound(f'Participant P{participant_id} not found.')
return participant
@staticmethod
def _update_participant_summary(session, report: DeceasedReport):
"""
These are the three fields from the Participant Summary that are affected by deceased reports,
and explanations of what they will provide and when:
* deceasedStatus
Will be UNSET for any participants that have no deceased reports (or only reports that have been denied).
Is set to PENDING when a participant has a deceased report with a status of *preliminary*.
And will be APPROVED for participants that have a *final* deceased report.
* deceasedAuthored
The most recent **issued** date received for an active deceased report. So for participants with a PENDING
deceased status this will be the time that an external user created a deceased report for the participant.
And for participants with an APPROVED status, this will be the time that the report was finalized.
* dateOfDeath
Date that the participant passed away if it was provided when creating or reviewing the report (using the
date from the reviewing request if both requests provided the field).
"""
participant_summary = session.query(ParticipantSummary).filter(
ParticipantSummary.participantId == report.participantId
).one_or_none()
if participant_summary:
if report.status == DeceasedReportStatus.DENIED:
participant_summary.deceasedStatus = DeceasedStatus.UNSET
participant_summary.deceasedAuthored = None
participant_summary.dateOfDeath = None
else:
participant_summary.deceasedStatus = DeceasedStatus(str(report.status))
participant_summary.dateOfDeath = report.dateOfDeath
if report.status == DeceasedReportStatus.APPROVED:
participant_summary.deceasedAuthored = report.reviewed
else:
participant_summary.deceasedAuthored = report.authored
def load_model(self, id_):
with self.session() as session:
report = session.query(DeceasedReport).filter(DeceasedReport.id == id_).one_or_none()
if report is None:
raise NotFound(f'DeceasedReport with id "{id_}" not found')
else:
return report
# pylint: disable=unused-argument
def from_client_json(self, resource, participant_id, id_=None, expected_version=None, client_id=None):
"""
The API takes deceased report data structured as a FHIR Specification 4.0 Observation
(http://hl7.org/fhir/observation.html). Listed below is an outline of each field, what it means for a deceased
report, and any requirements for the field.
.. code-block:: javascript
{
// For creating a deceased report, the status must be given as preliminary
// REQUIRED
status: "preliminary",
// CodeableConcept structure defining the observation as a deceased report for the RDR API
// REQUIRED
code: {
text: "DeceasedReport”
},
// The date of death of the participant
// OPTIONAL
effectiveDateTime: "2020-01-01",
// Details for how the user creating the report has become aware of the participant's deceased status
// REQUIRED
encounter: {
// Must be one of the following: EHR, ATTEMPTED_CONTACT, NEXT_KIN_HPO, NEXT_KIN_SUPPORT, OTHER
reference: "OTHER",
// Required if reference is given as OTHER
display: "Some other reason"
},
// The user that has created the deceased report
// REQUIRED
performer: [
{
type: "https://www.pmi-ops.org/healthpro-username",
reference: "user.name@pmi-ops.org"
}
],
// The timestamp of when the user created the report
// REQUIRED
issued: "2020-01-31T08:34:12Z", // assumed to be UTC if no timezone information is provided
// Text field for providing the cause of death
// OPTIONAL
valueString: "Heart disease",
// Array providing a single extension with a HumanName value providing information on the person
// that has reported that the participant has passed away
// REQUIRED unless the encounter specifies EHR or OTHER
extension: [
{
url: "https://www.pmi-ops.org/deceased-reporter",
valueHumanName: {
text: "John Doe",
extension: [
{
// REQUIRED
url: "http://hl7.org/fhir/ValueSet/relatedperson-relationshiptype",
valueCode: "SIB"
},
{
// OPTIONAL
url: "https://www.pmi-ops.org/email-address",
valueString: "jdoe@yahoo.com"
},
{
// OPTIONAL
url: "https://www.pmi-ops.org/phone-number",
valueString: "123-456-7890"
}
]
}
}
]
}
Any participants that are are not paired to an HPO are automatically finalized,
otherwise the reports remain in the *preliminary* state until they are reviewed by an additional user.
A review request can set a report as *final* or *cancelled*. Here's a description of the relevant fields
for reviewing a report:
.. code-block:: javascript
{
// Review status for the deceased report. Can be "final" to finalize a report, or "cancelled" to deny it
// REQUIRED
status: "final",
// REQUIRED
code: {
text: "DeceasedReport”
},
// Information for the user that has reviewed the report.
// REQUIRED
performer: [
{
type: "https://www.pmi-ops.org/healthpro-username",
reference: "user.name@pmi-ops.org"
}
],
// The date of death of the participant. Will replace what is currently on the report.
// OPTIONAL
effectiveDateTime: "2020-01-01",
// The timestamp of when the user reviewed the report
issued: "2020-01-31T08:34:12Z" // assumed to be UTC if no timezone information is provided
// Additional information for defining why the report is cancelled if cancelling the report
// REQUIRED if providing a status of "cancelled"
extension: [
{
url: "https://www.pmi-ops.org/observation-denial-reason",
valueReference: {
// Must be one of the following: INCORRECT_PARTICIPANT, MARKED_IN_ERROR,
// INSUFFICIENT_INFORMATION, OTHER
reference: "OTHER",
// Text description of the reason for cancelling
// REQUIRED if reference gives OTHER
display: "Another reason for denying the report"
}
}
]
}
"""
try:
observation = Observation(resource)
except FHIRValidationError:
raise BadRequest('Invalid FHIR Observation structure')
if observation.performer is None or not isinstance(observation.performer, list):
raise BadRequest('Performer array is required')
requested_report_status = self._read_report_status(observation)
if id_ is None: # No report was referenced with the request, so the request is to create a new one
if requested_report_status != DeceasedReportStatus.PENDING:
raise BadRequest('Status field should be "preliminary" when creating deceased report')
report = DeceasedReport(participantId=participant_id)
self._read_encounter(observation, report)
if report.notification in [DeceasedNotification.ATTEMPTED_CONTACT,
DeceasedNotification.NEXT_KIN_HPO,
DeceasedNotification.NEXT_KIN_SUPPORT]:
self._read_reporter_data(observation, report)
report.author = self._read_api_request_author(observation)
report.authored = self._read_authored_timestamp(observation)
report.causeOfDeath = observation.valueString
else:
report = self.load_model(id_)
if report.status != DeceasedReportStatus.PENDING:
raise BadRequest('Can only approve or deny a PENDING deceased report')
report.reviewer = self._read_api_request_author(observation)
report.reviewed = self._read_authored_timestamp(observation)
if requested_report_status == DeceasedReportStatus.DENIED:
self._read_denial_extension(observation, report)
report.status = requested_report_status
if observation.effectiveDateTime is not None:
date_of_death = observation.effectiveDateTime.date
if self._is_future_date(date_of_death):
raise BadRequest(f'Report effective datetime can not be a future date, received {date_of_death}')
report.dateOfDeath = date_of_death
return report
@staticmethod
def _convert_to_utc_datetime(datetime):
if datetime.tzinfo is None:
return pytz.utc.localize(datetime)
else:
return datetime.astimezone(pytz.utc)
def _to_fhir_date(self, datetime):
utc_datetime = self._convert_to_utc_datetime(datetime)
fhir_date = FHIRDate()
fhir_date.date = utc_datetime
return fhir_date
def _add_performer_data(self, observation: Observation, user: ApiUser, datetime, is_author):
performer = FHIRReference()
performer.type = user.system
performer.reference = user.username
# Add extension for details on the user's action
extension = Extension()
extension.url = 'https://www.pmi-ops.org/observation/' + ('authored' if is_author else 'reviewed')
extension.valueDateTime = self._to_fhir_date(datetime)
performer.extension = [extension]
observation.performer.append(performer)
def to_client_json(self, model: DeceasedReport):
"""
The FHIR Observation fields used for the deceased reports coming from the API are the same as for structures
sent to the API when creating and reviewing reports with the exceptions outlined below:
* Participant ID
The *subject* field will be populated with the ID of deceased report's participant.
.. code-block:: javascript
{
...
"subject": {
"reference": "P000000000"
},
...
}
* Creator and reviewer
For reviewed reports, the *performer* array will contain both the author of the report and the reviewer
along with the dates that they took their actions.
.. code-block:: javascript
{
...
"performer": [
{
"type": "https://www.pmi-ops.org/healthpro-username",
"reference": "user.name@pmi-ops.org"
"extension": [
{
"url": "https://www.pmi-ops.org/observation/authored",
"valueDateTime": "2020-01-31T08:34:12Z"
}
]
},
{
"type": "https://www.pmi-ops.org/healthpro-username",
"reference": "another.user@pmi-ops.org"
"extension": [
{
"url": "https://www.pmi-ops.org/observation/reviewed",
"valueDateTime": "2020-02-05T09:00:27Z"
}
]
}
],
...
}
"""
status_map = {
DeceasedReportStatus.PENDING: 'preliminary',
DeceasedReportStatus.APPROVED: 'final',
DeceasedReportStatus.DENIED: 'cancelled'
}
observation = Observation()
code = CodeableConcept()
code.text = 'DeceasedReport'
observation.code = code
identifier = Identifier()
identifier.value = str(model.id)
observation.identifier = [identifier]
subject = FHIRReference()
subject.reference = to_client_participant_id(model.participantId)
observation.subject = subject
observation.status = status_map[model.status]
observation.performer = []
self._add_performer_data(observation, model.author, model.authored, is_author=True)
try:
if model.reviewer:
self._add_performer_data(observation, model.reviewer, model.reviewed, is_author=False)
except DetachedInstanceError:
# With the current structure the reviewer will have been eager-loaded or set on the model,
# but the model is detached and the reviewer is expected to be None on pending reports.
# If the reviewer is None, sqlalchemy will try to check the database to see if it shouldn't be
# and this exception type will result.
pass
encounter = FHIRReference()
encounter.reference = str(model.notification)
if model.notification == DeceasedNotification.OTHER:
encounter.display = model.notificationOther
observation.encounter = encounter
if model.notification in [DeceasedNotification.NEXT_KIN_SUPPORT,
DeceasedNotification.NEXT_KIN_HPO,
DeceasedNotification.ATTEMPTED_CONTACT]:
reporter_extension = Extension()
reporter_extension.url = 'https://www.pmi-ops.org/deceased-reporter'
human_name = HumanName()
reporter_extension.valueHumanName = human_name
human_name.text = model.reporterName
human_name.extension = []
relationship_extension = Extension()
relationship_extension.url = 'http://hl7.org/fhir/ValueSet/relatedperson-relationshiptype'
relationship_extension.valueCode = model.reporterRelationship
human_name.extension.append(relationship_extension)
if model.reporterEmail:
email_extension = Extension()
email_extension.url = 'https://www.pmi-ops.org/email-address'
email_extension.valueString = model.reporterEmail
human_name.extension.append(email_extension)
if model.reporterPhone:
phone_extension = Extension()
phone_extension.url = 'https://www.pmi-ops.org/phone-number'
phone_extension.valueString = model.reporterPhone
human_name.extension.append(phone_extension)
observation.extension = [reporter_extension]
if model.status == DeceasedReportStatus.PENDING:
observation.issued = self._to_fhir_date(model.authored)
else:
observation.issued = self._to_fhir_date(model.reviewed)
date_of_death = FHIRDate()
date_of_death.date = model.dateOfDeath
observation.effectiveDateTime = date_of_death
observation.valueString = model.causeOfDeath
# Add denial reason extension
if model.status == DeceasedReportStatus.DENIED:
denial_reason_extension = Extension()
denial_reason_extension.url = 'https://www.pmi-ops.org/observation-denial-reason'
denial_reason_reference = FHIRReference()
denial_reason_reference.reference = str(model.denialReason)
if model.denialReason == DeceasedReportDenialReason.OTHER:
denial_reason_reference.display = model.denialReasonOther
denial_reason_extension.valueReference = denial_reason_reference
observation.extension = [denial_reason_extension]
return observation.as_json()
@staticmethod
def _deceased_report_lock_name(participant_id):
return f'rdr.deceased_report.p{participant_id}'
@staticmethod
def _release_report_lock(session, participant_id):
release_result = session.execute(
f"SELECT RELEASE_LOCK('{DeceasedReportDao._deceased_report_lock_name(participant_id)}')"
).scalar()
if release_result is None:
logging.error(f'Deceased report lock did not exist for P{participant_id}!')
elif release_result == 0:
logging.error(f'Deceased report lock for P{participant_id} was not taken by this thread!')
@staticmethod
def _can_insert_active_report(session, participant_id, lock_timeout_seconds=30):
# Obtain lock for creating a participant's deceased report
# If the named lock is free, 1 is returned immediately. If the lock is already taken, then it waits until it's
# free before making the check. Documentation gives that 'None' is returned in error cases.
lock_result = session.execute(
f"SELECT GET_LOCK('{DeceasedReportDao._deceased_report_lock_name(participant_id)}', {lock_timeout_seconds})"
).scalar()
if lock_result == 1:
# If we have the lock, we know we're the only transaction validating the insert.
has_active_reports_query = session.query(DeceasedReport).filter(
DeceasedReport.participantId == participant_id,
DeceasedReport.status != DeceasedReportStatus.DENIED
)
if session.query(has_active_reports_query.exists()).scalar():
raise Conflict(f'Participant P{participant_id} already has a preliminary or final deceased report')
else:
return True
else:
# If we got an error from the database or the lock was taken for 30 seconds then something's wrong
logging.error(f'Database error retrieving named lock for P{participant_id}, '
f'received result: "{lock_result}"')
raise InternalServerError('Unable to create deceased report')
def is_valid(self, report: DeceasedReport):
if self._is_future_datetime(report.authored):
raise BadRequest(f'Report issued date can not be a future date, received {report.authored}')
if report.notification == DeceasedNotification.NEXT_KIN_SUPPORT:
if not report.reporterRelationship:
raise BadRequest(f'Missing reporter relationship')
return True
def insert_with_session(self, session, obj: DeceasedReport):
# Should auto-approve reports for unpaired participants
participant = self._load_participant(obj.participantId)
if participant.hpoId == 0:
obj.status = DeceasedReportStatus.APPROVED
obj.reviewer = obj.author
obj.reviewed = obj.authored
if self.is_valid(obj) and self._can_insert_active_report(session, obj.participantId):
self._update_participant_summary(session, obj)
insert_result = super(DeceasedReportDao, self).insert_with_session(session, obj)
self._release_report_lock(session, obj.participantId)
return insert_result
def update_with_session(self, session, obj: DeceasedReport):
self._update_participant_summary(session, obj)
return super(DeceasedReportDao, self).update_with_session(session, obj)
def get_id(self, obj: DeceasedReport):
return obj.id
def get_etag(self, id_, participant_id): # pylint: disable=unused-argument
return None
def load_reports(self, participant_id=None, org_id=None, status=None):
"""
Deceased reports can be listed for for individual participants, or all of the reports matching a given status
(PENDING, APPROVED, OR DENIED) and/or organization id (such as UNSET). Reports will be listed by date of the
last action taken on them (authored or reviewed) with the most recent reports appearing at the top.
"""
ids_ignored_in_filter = config.getSettingJson(config.DECEASED_REPORT_FILTER_EXCEPTIONS, [])
with self.session() as session:
# Order reports by newest to oldest based on last date a user modified it
query = session.query(DeceasedReport).join(Participant).order_by(
desc(func.coalesce(DeceasedReport.reviewed, DeceasedReport.authored))
).filter(
or_(
and_(
Participant.suspensionStatus == SuspensionStatus.NOT_SUSPENDED,
Participant.withdrawalStatus == WithdrawalStatus.NOT_WITHDRAWN
),
Participant.participantId.in_(ids_ignored_in_filter)
)
)
if participant_id is not None:
query = query.filter(DeceasedReport.participantId == participant_id)
else:
if org_id is not None:
if org_id == 'UNSET':
query = query.filter(Participant.organizationId.is_(None))
else:
# Join and filter by the participant's Organization
query = query.join(Organization).filter(Organization.externalId == org_id)
if status is not None:
if status not in self.status_map:
raise BadRequest(f'Invalid status "{status}"')
query = query.filter(DeceasedReport.status == self.status_map[status])
return query.all()
|
all-of-us/raw-data-repository
|
rdr_service/dao/deceased_report_dao.py
|
Python
|
bsd-3-clause
| 29,962
|
import graphene
from ...core.permissions import PagePermissions
from ...page import models
from ..core.mutations import BaseBulkMutation, ModelBulkDeleteMutation
class PageBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of page IDs to delete."
)
class Meta:
description = "Deletes pages."
model = models.Page
permissions = (PagePermissions.MANAGE_PAGES,)
class PageBulkPublish(BaseBulkMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of page IDs to (un)publish."
)
is_published = graphene.Boolean(
required=True, description="Determine if pages will be published or not."
)
class Meta:
description = "Publish pages."
model = models.Page
permissions = (PagePermissions.MANAGE_PAGES,)
@classmethod
def bulk_action(cls, queryset, is_published):
queryset.update(is_published=is_published)
|
maferelo/saleor
|
saleor/graphql/page/bulk_mutations.py
|
Python
|
bsd-3-clause
| 1,076
|
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import warnings
import os.path as op
import numpy as np
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_allclose
from mne import (pick_channels, pick_types, Epochs, read_events,
set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from mne.epochs import BaseEpochs
from mne.io import read_raw_fif
from mne.io.constants import FIFF
from mne.io.proj import _has_eeg_average_ref_proj, Projection
from mne.io.reference import _apply_reference
from mne.datasets import testing
from mne.utils import run_tests_if_main
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
eve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif')
ave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif')
def _test_reference(raw, reref, ref_data, ref_from):
"""Test whether a reference has been correctly applied."""
# Separate EEG channels from other channel types
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=True, exclude='bads')
# Calculate indices of reference channesl
picks_ref = [raw.ch_names.index(ch) for ch in ref_from]
# Get data
_data = raw._data
_reref = reref._data
# Check that the ref has been properly computed
if ref_data is not None:
assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2))
# Get the raw EEG data and other channel data
raw_eeg_data = _data[..., picks_eeg, :]
raw_other_data = _data[..., picks_other, :]
# Get the rereferenced EEG data
reref_eeg_data = _reref[..., picks_eeg, :]
reref_other_data = _reref[..., picks_other, :]
# Check that non-EEG channels are untouched
assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15)
# Undo rereferencing of EEG channels if possible
if ref_data is not None:
if isinstance(raw, BaseEpochs):
unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :]
else:
unref_eeg_data = reref_eeg_data + ref_data
assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15)
@testing.requires_testing_data
def test_apply_reference():
"""Test base function for rereferencing."""
raw = read_raw_fif(fif_fname, preload=True)
# Rereference raw data by creating a copy of original data
reref, ref_data = _apply_reference(
raw.copy(), ref_from=['EEG 001', 'EEG 002'])
assert_true(reref.info['custom_ref_applied'])
_test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
# The CAR reference projection should have been removed by the function
assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))
# Test that data is modified in place when copy=False
reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'])
assert_true(raw is reref)
# Test that disabling the reference does not change anything
reref, ref_data = _apply_reference(raw.copy(), [])
assert_array_equal(raw._data, reref._data)
# Test re-referencing Epochs object
raw = read_raw_fif(fif_fname, preload=False)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
reref, ref_data = _apply_reference(
epochs.copy(), ref_from=['EEG 001', 'EEG 002'])
assert_true(reref.info['custom_ref_applied'])
_test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test re-referencing Evoked object
evoked = epochs.average()
reref, ref_data = _apply_reference(
evoked.copy(), ref_from=['EEG 001', 'EEG 002'])
assert_true(reref.info['custom_ref_applied'])
_test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])
# Referencing needs data to be preloaded
raw_np = read_raw_fif(fif_fname, preload=False)
assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])
# Test having inactive SSP projections that deal with channels involved
# during re-referencing
raw = read_raw_fif(fif_fname, preload=True)
raw.add_proj(
Projection(
active=False,
data=dict(
col_names=['EEG 001', 'EEG 002'],
row_names=None,
data=np.array([[1, 1]]),
ncol=2,
nrow=1
),
desc='test',
kind=1,
)
)
# Projection concerns channels mentioned in projector
assert_raises(RuntimeError, _apply_reference, raw, ['EEG 001'])
# Projection does not concern channels mentioned in projector, no error
_apply_reference(raw, ['EEG 003'], ['EEG 004'])
@testing.requires_testing_data
def test_set_eeg_reference():
"""Test rereference eeg data."""
raw = read_raw_fif(fif_fname, preload=True)
raw.info['projs'] = []
# Test setting an average reference projection
assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))
reref, ref_data = set_eeg_reference(raw, projection=True)
assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
assert_true(not reref.info['projs'][0]['active'])
assert_true(ref_data is None)
reref.apply_proj()
eeg_chans = [raw.ch_names[ch]
for ch in pick_types(raw.info, meg=False, eeg=True)]
_test_reference(raw, reref, ref_data,
[ch for ch in eeg_chans if ch not in raw.info['bads']])
# Test setting an average reference when one was already present
with warnings.catch_warnings(record=True):
reref, ref_data = set_eeg_reference(raw, copy=False, projection=True)
assert_true(ref_data is None)
# Test setting an average reference on non-preloaded data
raw_nopreload = read_raw_fif(fif_fname, preload=False)
raw_nopreload.info['projs'] = []
reref, ref_data = set_eeg_reference(raw_nopreload, projection=True)
assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
assert_true(not reref.info['projs'][0]['active'])
# Rereference raw data by creating a copy of original data
reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test that data is modified in place when copy=False
reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
copy=False)
assert_true(raw is reref)
# Test moving from custom to average reference
reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
reref, _ = set_eeg_reference(reref, projection=True)
assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
assert_equal(reref.info['custom_ref_applied'], False)
# When creating an average reference fails, make sure the
# custom_ref_applied flag remains untouched.
reref = raw.copy()
reref.info['custom_ref_applied'] = True
reref.pick_types(eeg=False) # Cause making average ref fail
assert_raises(ValueError, set_eeg_reference, reref, projection=True)
assert_true(reref.info['custom_ref_applied'])
# Test moving from average to custom reference
reref, ref_data = set_eeg_reference(raw, projection=True)
reref, _ = set_eeg_reference(reref, ['EEG 001', 'EEG 002'])
assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))
assert_equal(reref.info['custom_ref_applied'], True)
# Test that disabling the reference does not change anything
reref, ref_data = set_eeg_reference(raw, [])
assert_array_equal(raw._data, reref._data)
# Test that average reference gives identical results when calculated
# via SSP projection (projection=True) or directly (projection=False)
raw.info['projs'] = []
reref_1, _ = set_eeg_reference(raw.copy(), projection=True)
reref_1.apply_proj()
reref_2, _ = set_eeg_reference(raw.copy(), projection=False)
assert_allclose(reref_1._data, reref_2._data, rtol=1e-6, atol=1e-15)
# Test average reference without projection
reref, ref_data = set_eeg_reference(raw.copy(), ref_channels="average",
projection=False)
_test_reference(raw, reref, ref_data, eeg_chans)
# projection=True only works for ref_channels='average'
assert_raises(ValueError, set_eeg_reference, raw, [], True, True)
assert_raises(ValueError, set_eeg_reference, raw, ['EEG 001'], True, True)
@testing.requires_testing_data
def test_set_bipolar_reference():
"""Test bipolar referencing."""
raw = read_raw_fif(fif_fname, preload=True)
raw.apply_proj()
reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar',
{'kind': FIFF.FIFFV_EOG_CH,
'extra': 'some extra value'})
assert_true(reref.info['custom_ref_applied'])
# Compare result to a manual calculation
a = raw.copy().pick_channels(['EEG 001', 'EEG 002'])
a = a._data[0, :] - a._data[1, :]
b = reref.copy().pick_channels(['bipolar'])._data[0, :]
assert_allclose(a, b)
# Original channels should be replaced by a virtual one
assert_true('EEG 001' not in reref.ch_names)
assert_true('EEG 002' not in reref.ch_names)
assert_true('bipolar' in reref.ch_names)
# Check channel information
bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]
an_info = reref.info['chs'][raw.ch_names.index('EEG 001')]
for key in bp_info:
if key == 'loc':
assert_array_equal(bp_info[key], 0)
elif key == 'coil_type':
assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR)
elif key == 'kind':
assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH)
else:
assert_equal(bp_info[key], an_info[key])
assert_equal(bp_info['extra'], 'some extra value')
# Minimalist call
reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002')
assert_true('EEG 001-EEG 002' in reref.ch_names)
# Minimalist call with twice the same anode
reref = set_bipolar_reference(raw,
['EEG 001', 'EEG 001', 'EEG 002'],
['EEG 002', 'EEG 003', 'EEG 003'])
assert_true('EEG 001-EEG 002' in reref.ch_names)
assert_true('EEG 001-EEG 003' in reref.ch_names)
# Set multiple references at once
reref = set_bipolar_reference(
raw,
['EEG 001', 'EEG 003'],
['EEG 002', 'EEG 004'],
['bipolar1', 'bipolar2'],
[{'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'},
{'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'}],
)
a = raw.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004'])
a = np.array([a._data[0, :] - a._data[1, :],
a._data[2, :] - a._data[3, :]])
b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data
assert_allclose(a, b)
# Test creating a bipolar reference that doesn't involve EEG channels:
# it should not set the custom_ref_applied flag
reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112',
ch_info={'kind': FIFF.FIFFV_MEG_CH})
assert_true(not reref.info['custom_ref_applied'])
assert_true('MEG 0111-MEG 0112' in reref.ch_names)
# Test a battery of invalid inputs
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar')
assert_raises(ValueError, set_bipolar_reference, raw,
['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2'])
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', 'bipolar',
ch_info=[{'foo': 'bar'}, {'foo': 'bar'}])
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', ch_name='EEG 003')
def _check_channel_names(inst, ref_names):
"""Check channel names."""
if isinstance(ref_names, str):
ref_names = [ref_names]
# Test that the names of the reference channels are present in `ch_names`
ref_idx = pick_channels(inst.info['ch_names'], ref_names)
assert_true(len(ref_idx), len(ref_names))
# Test that the names of the reference channels are present in the `chs`
# list
inst.info._check_consistency() # Should raise no exceptions
@testing.requires_testing_data
def test_add_reference():
"""Test adding a reference."""
raw = read_raw_fif(fif_fname, preload=True)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
# check if channel already exists
assert_raises(ValueError, add_reference_channels,
raw, raw.info['ch_names'][0])
# add reference channel to Raw
raw_ref = add_reference_channels(raw, 'Ref', copy=True)
assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
_check_channel_names(raw_ref, 'Ref')
orig_nchan = raw.info['nchan']
raw = add_reference_channels(raw, 'Ref', copy=False)
assert_array_equal(raw._data, raw_ref._data)
assert_equal(raw.info['nchan'], orig_nchan + 1)
_check_channel_names(raw, 'Ref')
# for Neuromag fif's, the reference electrode location is placed in
# elements [3:6] of each "data" electrode location
assert_allclose(raw.info['chs'][-1]['loc'][:3],
raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6)
ref_idx = raw.ch_names.index('Ref')
ref_data, _ = raw[ref_idx]
assert_array_equal(ref_data, 0)
# add reference channel to Raw when no digitization points exist
raw = read_raw_fif(fif_fname).crop(0, 1).load_data()
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
del raw.info['dig']
raw_ref = add_reference_channels(raw, 'Ref', copy=True)
assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
_check_channel_names(raw_ref, 'Ref')
orig_nchan = raw.info['nchan']
raw = add_reference_channels(raw, 'Ref', copy=False)
assert_array_equal(raw._data, raw_ref._data)
assert_equal(raw.info['nchan'], orig_nchan + 1)
_check_channel_names(raw, 'Ref')
# Test adding an existing channel as reference channel
assert_raises(ValueError, add_reference_channels, raw,
raw.info['ch_names'][0])
# add two reference channels to Raw
raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
_check_channel_names(raw_ref, ['M1', 'M2'])
assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
assert_array_equal(raw_ref._data[-2:, :], 0)
raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
_check_channel_names(raw, ['M1', 'M2'])
ref_idx = raw.ch_names.index('M1')
ref_idy = raw.ch_names.index('M2')
ref_data, _ = raw[[ref_idx, ref_idy]]
assert_array_equal(ref_data, 0)
# add reference channel to epochs
raw = read_raw_fif(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
# default: proj=True, after which adding a Ref channel is prohibited
assert_raises(RuntimeError, add_reference_channels, epochs, 'Ref')
# create epochs in delayed mode, allowing removal of CAR when re-reffing
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True, proj='delayed')
epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)
assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
_check_channel_names(epochs_ref, 'Ref')
ref_idx = epochs_ref.ch_names.index('Ref')
ref_data = epochs_ref.get_data()[:, ref_idx, :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
assert_array_equal(epochs.get_data()[:, picks_eeg, :],
epochs_ref.get_data()[:, picks_eeg, :])
# add two reference channels to epochs
raw = read_raw_fif(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
# create epochs in delayed mode, allowing removal of CAR when re-reffing
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True, proj='delayed')
with warnings.catch_warnings(record=True): # multiple set zero
epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
_check_channel_names(epochs_ref, ['M1', 'M2'])
ref_idx = epochs_ref.ch_names.index('M1')
ref_idy = epochs_ref.ch_names.index('M2')
assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')
assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')
ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
assert_array_equal(epochs.get_data()[:, picks_eeg, :],
epochs_ref.get_data()[:, picks_eeg, :])
# add reference channel to evoked
raw = read_raw_fif(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
# create epochs in delayed mode, allowing removal of CAR when re-reffing
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True, proj='delayed')
evoked = epochs.average()
evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
_check_channel_names(evoked_ref, 'Ref')
ref_idx = evoked_ref.ch_names.index('Ref')
ref_data = evoked_ref.data[ref_idx, :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
assert_array_equal(evoked.data[picks_eeg, :],
evoked_ref.data[picks_eeg, :])
# add two reference channels to evoked
raw = read_raw_fif(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
# create epochs in delayed mode, allowing removal of CAR when re-reffing
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True, proj='delayed')
evoked = epochs.average()
with warnings.catch_warnings(record=True): # multiple set zero
evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
_check_channel_names(evoked_ref, ['M1', 'M2'])
ref_idx = evoked_ref.ch_names.index('M1')
ref_idy = evoked_ref.ch_names.index('M2')
ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
assert_array_equal(evoked.data[picks_eeg, :],
evoked_ref.data[picks_eeg, :])
# Test invalid inputs
raw_np = read_raw_fif(fif_fname, preload=False)
assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
assert_raises(ValueError, add_reference_channels, raw, 1)
run_tests_if_main()
|
jaeilepp/mne-python
|
mne/io/tests/test_reference.py
|
Python
|
bsd-3-clause
| 20,218
|
# coding: utf-8
# python-3
# Author: Sreejith Menon (smenon8@uic.edu)
# Description: Contains a single method that generates mechanical turk jobs for generating photo-albums in bulk.
# This script contains a __main__() method that accepts command line arguments and can be directly executed through terminal.
# To run the script provide 4 parameters in the order fileName, jobMapName, numberOfFiles, numberOfImgs.
# python CreateTurkFilesBulk.py /tmp/sample /tmp/sample.csv 2 10
# Successfully written : /tmp/sample1
# Successfully written : /tmp/sample2
import csv
import GetPropertiesAPI as GP
import GenerateMTurkFileAPI as GM
import importlib
import random
import sys
# un-comment if there are any changes made to API
importlib.reload(GP)
importlib.reload(GM)
# Selects noOfJobs number of random contributors (there might be duplicates in the selected contributor list).
# There is hard-coded code for removing contributors who did not click any picture.
# For each job, noOfImgsPerJob number of images are selected from the range of images a particular contributor has clicked.
# This is done to ensure that given a particular album, all the images are clicked by the same contributor.
# The script assumes the value of prodFileWrite in line 63 as True.
# It generates 1 input, 1 question file per noOfJobs and 1 map file that contains a map between albums and the images in them.
def createTurkFilesBulk(flNm,jobMapName,noOfJobs,noOfImgsPerJob = 20):
contributorImages = {}
for contributor in range(1,59):
contributorImages[contributor] = GP.getContributorGID(contributor)
contributorImages.pop(52)
contributorImages.pop(57)
contributorImages.pop(8)
contributorImages.pop(11)
contributorImages.pop(17)
contributorImages.pop(32)
contributorImages.pop(34)
contributorImages.pop(41)
contributors = list(filter(lambda x: len(contributorImages[x]) > 8, contributorImages.keys()))
selectedImgContributors = []
for i in range(0,noOfJobs):
selectedImgContributors.append(contributors[random.randrange(0,len(contributors))])
argToAPI = []
for index in selectedImgContributors:
imgList = contributorImages[index]
minGID = min(imgList)
maxGID = max(imgList)
argToAPI.append([index,minGID,maxGID])
jobImageMap= {}
for i in range(0,noOfJobs):
flName = str(flNm + str(i+1))
tup = argToAPI[i]
slctdImgs = GM.generateMTurkFile(tup[1],tup[2],str(flName),noOfImgsPerJob,True)
jobImageMap[flName] = slctdImgs
i += 1
inFL = open("files/sampleInput.txt","r")
outFL = open(str(flName+ ".input"),"w")
for line in inFL:
outFL.write(line)
inFL.close()
outFL.close()
print("Successfully written : " + flName)
writeFL = open(jobMapName,"w")
writer = csv.writer(writeFL)
for key in jobImageMap:
writer.writerow([key] + [jobImageMap[key]])
writeFL.close()
def __main__(args):
if len(args) == 5:
flName = args[1]
jobMapName = args[2]
numberOfFiles = int(args[3])
numberOfImgs = int(args[4])
createTurkFilesBulk(flName,jobMapName,numberOfFiles,numberOfImgs)
else:
print("Error: Provide 4 parameters in the order fileName, jobMapName, numberOfFiles, numberOfImgs")
if __name__ == "__main__":
__main__(sys.argv)
# createTurkFilesBulk("/tmp/test", "/tmp/map_test.csv", 5, 100)
|
smenon8/AnimalWildlifeEstimator
|
script/CreateTurkFilesBulk.py
|
Python
|
bsd-3-clause
| 3,489
|
"""Test that the window can be hidden and shown.
Expected behaviour:
One window will be opened. Every 5 seconds it will toggle between
hidden and shown.
Press escape or close the window to finish the test.
"""
import time
import unittest
from pyglet import window
class WindowSetVisible(unittest.TestCase):
def test_set_visible(self):
print(__doc__)
w = window.Window(200, 200)
last_time = time.time()
visible = True
while not w.has_exit:
if time.time() - last_time > 5:
visible = not visible
w.set_visible(visible)
last_time = time.time()
print('Set visibility to %r.' % visible)
w.dispatch_events()
w.close()
|
bitcraft/pyglet
|
tests/interactive/window/window_set_visible.py
|
Python
|
bsd-3-clause
| 772
|
from __future__ import absolute_import, division, unicode_literals
from colorsys import hsv_to_rgb
import cProfile
import inspect
import os
from pstats import Stats
from six import PY2
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.utils.six.moves import cStringIO
from debug_toolbar.panels import Panel
from django.views.generic.base import View
from line_profiler import LineProfiler, show_func
from . import signals
class DjangoDebugToolbarStats(Stats):
__root = None
def get_root_func(self, view_func):
if self.__root is None:
filename = view_func.__code__.co_filename
firstlineno = view_func.__code__.co_firstlineno
for func, (cc, nc, tt, ct, callers) in self.stats.items():
if (len(callers) == 0
and func[0] == filename
and func[1] == firstlineno):
self.__root = func
break
return self.__root
class FunctionCall(object):
"""
The FunctionCall object is a helper object that encapsulates some of the
complexity of working with pstats/cProfile objects
"""
def __init__(self, statobj, func, depth=0, stats=None,
id=0, parent_ids=[], hsv=(0, 0.5, 1)):
self.statobj = statobj
self.func = func
if stats:
self.stats = stats
else:
self.stats = statobj.stats[func][:4]
self.depth = depth
self.id = id
self.parent_ids = parent_ids
self.hsv = hsv
self._line_stats_text = None
def parent_classes(self):
return self.parent_classes
def background(self):
r, g, b = hsv_to_rgb(*self.hsv)
return 'rgb(%f%%,%f%%,%f%%)' % (r * 100, g * 100, b * 100)
def func_std_string(self): # match what old profile produced
func_name = self.func
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
file_name, line_num, method = self.func
idx = file_name.find('/site-packages/')
if idx > -1:
file_name = file_name[(idx + 14):]
file_path, file_name = file_name.rsplit(os.sep, 1)
return mark_safe(
'<span class="path">{0}/</span>'
'<span class="file">{1}</span>'
' in <span class="func">{3}</span>'
'(<span class="lineno">{2}</span>)'.format(
file_path,
file_name,
line_num,
method))
def subfuncs(self):
i = 0
h, s, v = self.hsv
count = len(self.statobj.all_callees[self.func])
for func, stats in self.statobj.all_callees[self.func].items():
i += 1
h1 = h + (i / count) / (self.depth + 1)
if stats[3] == 0 or self.stats[3] == 0:
s1 = 0
else:
s1 = s * (stats[3] / self.stats[3])
yield FunctionCall(self.statobj,
func,
self.depth + 1,
stats=stats,
id=str(self.id) + '_' + str(i),
parent_ids=self.parent_ids + [self.id],
hsv=(h1, s1, 1))
def count(self):
return self.stats[1]
def tottime(self):
return self.stats[2]
def cumtime(self):
return self.stats[3]
def tottime_per_call(self):
cc, nc, tt, ct = self.stats
if nc == 0:
return 0
return tt / nc
def cumtime_per_call(self):
cc, nc, tt, ct = self.stats
if cc == 0:
return 0
return ct / cc
def indent(self):
return 16 * self.depth
def line_stats_text(self):
if self._line_stats_text is None:
lstats = self.statobj.line_stats
if self.func in lstats.timings:
out = cStringIO()
fn, lineno, name = self.func
try:
show_func(fn,
lineno,
name,
lstats.timings[self.func],
lstats.unit, stream=out)
self._line_stats_text = out.getvalue()
except ZeroDivisionError:
self._line_stats_text = ("There was a ZeroDivisionError, "
"total_time was probably zero")
else:
self._line_stats_text = False
return self._line_stats_text
class ProfilingPanel(Panel):
"""
Panel that displays profiling information.
"""
title = _('Profiling')
template = 'debug_toolbar_line_profiler/panels/profiling.html'
def _unwrap_closure_and_profile(self, func):
if not hasattr(func, '__code__'):
return
self.line_profiler.add_function(func)
for subfunc in getattr(func, 'profile_additional', []):
self._unwrap_closure_and_profile(subfunc)
if PY2:
func_closure = func.func_closure
else:
func_closure = func.__closure__
if func_closure:
for cell in func_closure:
target = cell.cell_contents
if hasattr(target, '__code__'):
self._unwrap_closure_and_profile(cell.cell_contents)
if inspect.isclass(target) and View in inspect.getmro(target):
for name, value in inspect.getmembers(target):
if name[0] != '_' and inspect.ismethod(value):
self._unwrap_closure_and_profile(value)
def process_view(self, request, view_func, view_args, view_kwargs):
self.view_func = view_func
self.profiler = cProfile.Profile()
args = (request,) + view_args
self.line_profiler = LineProfiler()
self._unwrap_closure_and_profile(view_func)
signals.profiler_setup.send(sender=self,
profiler=self.line_profiler,
view_func=view_func,
view_args=view_args,
view_kwargs=view_kwargs)
self.line_profiler.enable_by_count()
out = self.profiler.runcall(view_func, *args, **view_kwargs)
self.line_profiler.disable_by_count()
return out
def add_node(self, func_list, func, max_depth, cum_time=0.1):
"""
add_node does a depth first traversal of the call graph, appending a
FunctionCall object to func_list, so that the Django template only
has to do a single for loop over func_list that can render a tree
structure
Parameters:
func_list is an array that will have a FunctionCall for each call
added to it
func is a FunctionCall object that will have all its callees added
max_depth is the maximum depth we should recurse
cum_time is the minimum cum_time a function should have to be
included in the output
"""
func_list.append(func)
func.has_subfuncs = False
# this function somewhat dangerously relies on FunctionCall to set its
# subfuncs' depth argument correctly
if func.depth >= max_depth:
return
# func.subfuncs returns FunctionCall objects
subs = sorted(func.subfuncs(), key=FunctionCall.cumtime, reverse=True)
for subfunc in subs:
# a sub function is important if it takes a long time or it has
# line_stats
if (subfunc.cumtime() >= cum_time or
(hasattr(self.stats, 'line_stats') and
subfunc.func in self.stats.line_stats.timings)):
func.has_subfuncs = True
self.add_node(
func_list=func_list,
func=subfunc,
max_depth=max_depth,
cum_time=subfunc.cumtime()/16)
def process_response(self, request, response):
if not hasattr(self, 'profiler'):
return None
# Could be delayed until the panel content is requested (perf. optim.)
self.profiler.create_stats()
self.stats = DjangoDebugToolbarStats(self.profiler)
self.stats.line_stats = self.line_profiler.get_stats()
self.stats.calc_callees()
func_list = []
root_func = self.stats.get_root_func(self.view_func)
if root_func is not None:
root_node = FunctionCall(statobj=self.stats,
func=root_func,
depth=0)
self.add_node(
func_list=func_list,
func=root_node,
max_depth=10,
cum_time=root_node.cumtime() / 8
)
# else:
# what should we do if we didn't detect a root function? It's not
# clear what causes this, but there are real world examples of it (see
# https://github.com/dmclain/django-debug-toolbar-line-profiler/issues/11)
self.record_stats({'func_list': func_list})
|
dmclain/django-debug-toolbar-line-profiler
|
debug_toolbar_line_profiler/panel.py
|
Python
|
bsd-3-clause
| 9,567
|
import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
wkt_writer_get_outdim = GEOSFuncFactory(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = GEOSFuncFactory(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_char])
wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int])
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self.destructor.func = self.destructor.get_func(
*self.destructor.args, **self.destructor.kwargs
)
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
ptr_type = WKT_READ_PTR
destructor = wkt_reader_destroy
def read(self, wkt):
if not isinstance(wkt, (bytes, str)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
ptr_type = WKB_READ_PTR
destructor = wkb_reader_destroy
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, str)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
ptr_type = WKT_WRITE_PTR
destructor = wkt_writer_destroy
_trim = False
_precision = None
def __init__(self, dim=2, trim=False, precision=None):
super().__init__()
if bool(trim) != self._trim:
self.trim = trim
if precision is not None:
self.precision = precision
self.outdim = dim
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
@property
def trim(self):
return self._trim
@trim.setter
def trim(self, flag):
if bool(flag) != self._trim:
self._trim = bool(flag)
wkt_writer_set_trim(self.ptr, b'\x01' if flag else b'\x00')
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, precision):
if (not isinstance(precision, int) or precision < 0) and precision is not None:
raise AttributeError('WKT output rounding precision must be non-negative integer or None.')
if precision != self._precision:
self._precision = precision
wkt_writer_set_precision(self.ptr, -1 if precision is None else precision)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
ptr_type = WKB_WRITE_PTR
destructor = wkb_writer_destroy
def __init__(self, dim=2):
super().__init__()
self.outdim = dim
def _handle_empty_point(self, geom):
from django.contrib.gis.geos import Point
if isinstance(geom, Point) and geom.empty:
if self.srid:
# PostGIS uses POINT(NaN NaN) for WKB representation of empty
# points. Use it for EWKB as it's a PostGIS specific format.
# https://trac.osgeo.org/postgis/ticket/3181
geom = Point(float('NaN'), float('NaN'), srid=geom.srid)
else:
raise ValueError('Empty point is not representable in WKB.')
return geom
def write(self, geom):
"Returns the WKB representation of the given geometry."
from django.contrib.gis.geos import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
# Fix GEOS output for empty polygon.
# See https://trac.osgeo.org/geos/ticket/680.
wkb = wkb[:-8] + b'\0' * 4
return memoryview(wkb)
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
from django.contrib.gis.geos.polygon import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
wkb = wkb[:-16] + b'0' * 8
return wkb
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
@property
def outdim(self):
return wkb_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
# Property for getting/setting the include srid flag.
@property
def srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
@srid.setter
def srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2, trim=False, precision=None):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter(dim=dim, trim=trim, precision=precision)
else:
thread_context.wkt_w.outdim = dim
thread_context.wkt_w.trim = trim
thread_context.wkt_w.precision = precision
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter(dim=dim)
else:
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter(dim=dim)
thread_context.ewkb_w.srid = True
else:
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
|
auready/django
|
django/contrib/gis/geos/prototypes/io.py
|
Python
|
bsd-3-clause
| 11,322
|
from django.db.models.signals import m2m_changed, post_delete, post_save
from kitsune.products.models import Product, Topic
from kitsune.search.decorators import search_receiver
from kitsune.search.es7_utils import delete_object, index_object, remove_from_field
from kitsune.wiki.models import Document
@search_receiver(post_save, Document)
@search_receiver(m2m_changed, Document.products.through)
@search_receiver(m2m_changed, Document.topics.through)
def handle_document_save(instance, **kwargs):
if instance.current_revision:
index_object.delay("WikiDocument", instance.pk)
@search_receiver(post_delete, Document)
def handle_document_delete(instance, **kwargs):
delete_object.delay("WikiDocument", instance.pk)
@search_receiver(post_delete, Product)
def handle_product_delete(instance, **kwargs):
remove_from_field.delay("WikiDocument", "product_ids", instance.pk)
@search_receiver(post_delete, Topic)
def handle_topic_delete(instance, **kwargs):
remove_from_field.delay("WikiDocument", "topic_ids", instance.pk)
|
mozilla/kitsune
|
kitsune/search/signals/wiki.py
|
Python
|
bsd-3-clause
| 1,049
|
"""
==========
Exceptions
==========
NepidemiX project exceptions.
"""
__author__ = "Lukas Ahrenberg <lukas@ahrenberg.se>"
__license__ = "Modified BSD License"
__all__ = ["NepidemiXBaseException"]
class NepidemiXBaseException(Exception):
"""
Generic exception for the Network Model class.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
|
impact-hiv/NepidemiX
|
nepidemix/exceptions.py
|
Python
|
bsd-3-clause
| 423
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import argparse
from util import io
import mesh.patch as patch
# plot a single variable from an output file
#
# Usage: ./plotvar.py filename variable
def makeplot(plotfile, variable, outfile,
width=6.5, height=5.25,
log=False, compact=False, quiet=False):
sim = io.read(plotfile)
if isinstance(sim, patch.CellCenterData2d):
myd = sim
else:
myd = sim.cc_data
myg = myd.grid
plt.figure(num=1, figsize=(width, height), dpi=100, facecolor='w')
var = myd.get_var(variable)
if log:
var = np.log10(var)
plt.imshow(np.transpose(var.v()),
interpolation="nearest", origin="lower",
extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax])
if not compact:
plt.colorbar()
plt.xlabel("x")
plt.ylabel("y")
if compact:
plt.axis("off")
plt.subplots_adjust(bottom=0.0, top=1.0, left=0.0, right=1.0)
plt.savefig(outfile)
else:
plt.savefig(outfile, bbox_inches="tight")
if not quiet:
plt.show()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-o", type=str, default="plot.png",
metavar="plot.png", help="output file name")
parser.add_argument("--log", action="store_true",
help="plot log of variable")
parser.add_argument("--compact", action="store_true",
help="remove axes and border")
parser.add_argument("--quiet", action="store_true",
help="don't show the figure")
parser.add_argument("-W", type=float, default=6.5,
metavar="width", help="plot width (inches)")
parser.add_argument("-H", type=float, default=5.25,
metavar="height", help="plot height (inches)")
parser.add_argument("plotfile", type=str, nargs=1,
help="the plotfile you wish to plot")
parser.add_argument("variable", type=str, nargs=1,
help="the name of the solver used to run the simulation")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
makeplot(args.plotfile[0], args.variable[0], args.o,
width=args.W, height=args.H,
log=args.log, compact=args.compact, quiet=args.quiet)
|
harpolea/pyro2
|
analysis/plotvar.py
|
Python
|
bsd-3-clause
| 2,438
|
"""
django-helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
views/public.py - All public facing views, eg non-staff (no authentication
required) views.
"""
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import loader, Context, RequestContext
from django.utils.translation import ugettext as _
from django.views.generic import View
from django.views.generic.edit import ModelFormMixin
from helpdesk import settings as helpdesk_settings
from helpdesk.exceptions import HelpDeskException
from helpdesk.forms import PublicTicketForm
from helpdesk.lib import send_templated_mail, text_is_spam
from helpdesk.models import Ticket, Queue, UserSettings, KBCategory
from helpdesk.views.base import HelpDeskCreateView, HelpDeskDetailView, HelpDeskTemplateView, HelpDeskUpdateView
class CreateTicket(HelpDeskCreateView):
template_name = 'helpdesk/public_homepage.html'
form_class = PublicTicketForm
context_object_name = 'ticket'
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated() and helpdesk_settings.HELPDESK_REDIRECT_TO_LOGIN_BY_DEFAULT:
return HttpResponseRedirect(reverse('login'))
if (request.user.is_staff or (request.user.is_authenticated() and helpdesk_settings.HELPDESK_ALLOW_NON_STAFF_TICKET_UPDATE)):
try:
if getattr(request.user.usersettings.settings, 'login_view_ticketlist', False):
return HttpResponseRedirect(reverse('helpdesk_list'))
else:
return HttpResponseRedirect(reverse('helpdesk_dashboard'))
except UserSettings.DoesNotExist:
return HttpResponseRedirect(reverse('helpdesk_dashboard'))
return super(CreateTicket, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
data = super(CreateTicket, self).get_context_data(**kwargs)
data['kb_categories'] = KBCategory.objects.all()
data['helpdesk_settings'] = helpdesk_settings
return data
def get_initial(self, **kwargs):
data = super(CreateTicket, self).get_initial(**kwargs)
if self.request.GET.get('queue', None):
try:
queue = Queue.objects.get(slug=self.request.GET.get('queue', None))
data['queue_id'] = queue.id
except Queue.DoesNotExist:
queue = None
if self.request.user.is_authenticated() and self.request.user.email:
data['submitter_email'] = self.request.user.email
return data
def form_valid(self, form):
if text_is_spam(form.cleaned_data['body'], self.request):
# This submission is spam. Let's not save it.
self.template_name = 'helpdesk/public_spam.html'
return self.render_to_response({})
return super(CreateTicket, self).form_valid(form)
def get_success_url(self):
return reverse('helpdesk_ticket_url_view', args=[self.object.encode()])
class UpdateTicket(View):
def put(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
ticket = Ticket.objects.get(id=self.kwargs['pk'])
if ticket is None:
#return HelpDeskException(_('Invalid ticket ID or e-mail address. Please try again.'), template='helpdesk/error.html')
return HelpDeskException(_('Invalid ticket ID. Please try again.'), template='helpdesk/error.html')
if ticket.has_permission(request.user, 'close'):
ticket.close(comment = _('Submitter accepted resolution and closed ticket'))
return HttpResponseRedirect(reverse('helpdesk_ticket_url_view', args=[ticket.encode()]))
class TicketDetails(HelpDeskDetailView):
template_name = 'helpdesk/ticket_public_view.html'
context_object_name = 'ticket'
def get_object(self):
# For GET method and we have a decode code
if self.kwargs.get('code'):
ticket_id, email = Ticket.decode(self.kwargs['code'].encode("utf-8"))
elif self.request.GET.get('email') and self.request.GET.get('ticket'):
parts = self.request.GET['ticket'].split('-')
queue = '-'.join(parts[0:-1])
ticket_id = parts[-1]
email = self.request.GET.get('email')
# ToDo
# I think its not really right to check queue cause moderator can move ticket to different queue
return Ticket.objects.get(
id=ticket_id,
#queue__slug__iexact=queue,
submitter_email__iexact=email
)
class ChangeLanguage(HelpDeskTemplateView):
template_name = 'helpdesk/public_change_language.html'
def get_context_data(self, **kwargs):
data = super(ChangeLanguage, self).get_context_data(**kwargs)
data['return_to'] = self.request.GET['return_to']
return data
|
vladyslav2/django-helpdesk
|
helpdesk/views/public.py
|
Python
|
bsd-3-clause
| 5,141
|
"""Functions to be used with hyperopt for doing hyper parameter optimization."""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import super
from builtins import open
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from past.utils import old_div
import os
import rlpy.Tools.results as tres
import rlpy.Tools.run as rt
import hyperopt
import numpy as np
import time
import pickle
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
def dummy_f():
pass
def _search_condor_parallel(path, space, trials_per_point, setting,
objective, max_evals,
algo=hyperopt.tpe.suggest,
max_queue_len=10, poll_interval_secs=30):
"""
block_until_done means that the process blocks until ALL jobs in
trials are not in running or new state
suggest() can pass instance of StopExperiment to break out of
enqueuing loop
"""
trials = CondorTrials(path=path, ids=list(range(1, trials_per_point + 1)),
setting=setting, objective=objective)
domain = hyperopt.Domain(dummy_f, space, rseed=123)
trial_path = os.path.join(path, "trials.pck")
if os.path.exists(trial_path):
with open(trial_path) as f:
old_trials = pickle.load(f)
print("Loaded existing trials")
if old_trials.setting == trials.setting and trials.ids == old_trials.ids:
trials = old_trials
n_queued = trials.count_by_state_unsynced(hyperopt.JOB_STATES)
def get_queue_len():
trials.count_by_state_unsynced(hyperopt.base.JOB_STATE_NEW)
return trials.update_trials(trials._trials)
stopped = False
while n_queued < max_evals:
qlen = get_queue_len()
while qlen < max_queue_len and n_queued < max_evals:
n_to_enqueue = 1 # min(self.max_queue_len - qlen, N - n_queued)
new_ids = trials.new_trial_ids(n_to_enqueue)
trials.refresh()
new_trials = algo(new_ids, domain, trials)
if new_trials is hyperopt.base.StopExperiment:
stopped = True
break
else:
assert len(new_ids) >= len(new_trials)
if len(new_trials):
trials.insert_trial_docs(new_trials)
trials.refresh()
n_queued += len(new_trials)
qlen = get_queue_len()
else:
break
with open(trial_path, 'w') as f:
pickle.dump(trials, f)
# -- wait for workers to fill in the trials
time.sleep(poll_interval_secs)
if stopped:
break
while trials.count_by_state_unsynced(hyperopt.base.JOB_STATE_NEW) > 0:
time.sleep(poll_interval_secs)
trials.refresh()
return trials
class CondorTrials(hyperopt.Trials):
"""
modified trail class specifically designed to run RLPy experiments
in parallel on a htcondor job scheduling system
"""
async = True
def __init__(self, setting, path, ids, objective, **kwargs):
super(CondorTrials, self).__init__(**kwargs)
self.path = path
self.ids = ids
self.setting = setting
self.objective = objective
def refresh(self):
self.update_trials(self._dynamic_trials)
super(CondorTrials, self).refresh()
def _insert_trial_docs(self, docs):
"""insert with no error checking
"""
rval = [doc['tid'] for doc in docs]
# submit all jobs to the cluster
self.update_trials(docs)
self._dynamic_trials.extend(docs)
return rval
def count_by_state_synced(self, arg, trials=None):
"""
Return trial counts by looking at self._trials
"""
if trials is None:
trials = self._trials
self.update_trials(trials)
if arg in hyperopt.JOB_STATES:
queue = [doc for doc in trials if doc['state'] == arg]
elif hasattr(arg, '__iter__'):
states = set(arg)
assert all([x in hyperopt.JOB_STATES for x in states])
queue = [doc for doc in trials if doc['state'] in states]
else:
raise TypeError(arg)
rval = len(queue)
return rval
def unwrap_hyperparam(self, vals):
return {a: b[0] for a, b in list(vals.items())}
def make_full_path(self, hyperparam):
return (
os.path.join(
self.path,
"-".join([str(v) for v in list(hyperparam.values())]))
)
def update_trials(self, trials):
count = 0
for trial in trials:
if trial["state"] == hyperopt.JOB_STATE_NEW:
if "submitted" not in trial or not trial["submitted"]:
# submit jobs and set status to running
hyperparam = self.unwrap_hyperparam(trial["misc"]["vals"])
full_path = self.make_full_path(hyperparam)
rt.run(self.setting, location=full_path, ids=self.ids,
parallelization="condor", force_rerun=False, block=False,
**hyperparam)
trial["submitted"] = True
else:
count += 1
#trial["state"] = hyperopt.JOB_STATE_RUNNING
# elif trial["state"] == hyperopt.JOB_STATE_RUNNING:
# check if all results files are there and set to ok
hyperparam = self.unwrap_hyperparam(trial["misc"]["vals"])
full_path = self.make_full_path(hyperparam)
finished_ids = rt.get_finished_ids(path=full_path)
if set(finished_ids).issuperset(set(self.ids)):
trial["state"] = hyperopt.JOB_STATE_DONE
print(trial["tid"], "done")
trial["result"] = self.get_results(full_path)
print("Parameters", hyperparam)
return count
def get_results(self, path):
# all jobs should be done
res = tres.load_results(path)
mapping = {'max_steps': (-1., 'steps'), 'min_steps': (1., 'steps'),
'max_reward': (-1., 'return')}
neg, quan = mapping[self.objective]
avg, std, n_trials = tres.avg_quantity(res, quan)
avg *= neg
weights = (np.arange(len(avg)) + 1) ** 2
loss = old_div((avg * weights).sum(), weights.sum())
print(time.ctime())
print("Loss: {:.4g}".format(loss))
# use #steps/eps at the moment
return {"loss": loss,
"num_trials": n_trials[-1],
"status": hyperopt.STATUS_OK,
"std_last_mean": std[-1]}
def import_param_space(filename):
"""
gets the variable param_space from a file without executing its __main__ section
"""
content = ""
with open(filename) as f:
lines = f.readlines()
for l in lines:
if "if __name__ ==" in l:
# beware: we assume that the __main__ execution block is the
# last one in the file
break
content += l
vars = {}
exec(content, vars)
return vars["param_space"]
def find_hyperparameters(
setting, path, space=None, max_evals=100, trials_per_point=30,
parallelization="sequential",
objective="max_reward", max_concurrent_jobs=100):
"""
This function does hyperparameter optimization for RLPy experiments with the
hyperopt library.
At the end an instance of the optimization trials is stored in "path"/trials.pck
:param setting: file specifying the experimental setup.
It contains a make_experiment function and a dictionary
named param_space if the argument space is not used.
For each key of param_space there needs to be an optional
argument in make_experiment
:param path: directory used to store all intermediate results.
:param space: (optional) an alternative specification of the hyperparameter
space
:param max_evals: maximum number of evaluations of a single hyperparameter
setting
:param trials_per_point: specifies the number of independent runs (with
different seeds) of the experiment for evaluating a single hyperparameter
setting.
:param parallelization: either **sequential**, **joblib**, **condor_all**
or **condor_full**, **condor**.
the condor options can be used in a computing cluster with a HTCondor
machine. The joblib option parallelizes runs on one machine and sequential
runs every experiment in sequence.
:param objective: (optional) string specifying the objective to optimize,
possible values are *max_reward*, *min_steps*, *max_steps*
:param max_concurrent_jobs: only relevant for condor_full parallelization.
specifies the maximum number of jobs that should run at the same time.
:return: a tuple containing the best hyperarameter settings and the hyperopt
trials instance of the optimization procedure
"""
if space is None:
space = import_param_space(setting)
def f(hyperparam):
"""function to optimize by hyperopt"""
# "temporary" directory to use
full_path = os.path.join(
path,
"-".join([str(v) for v in list(hyperparam.values())]))
# execute experiment
rt.run(setting, location=full_path, ids=list(range(1, trials_per_point + 1)),
parallelization=parallelization, force_rerun=False, block=True, **hyperparam)
# all jobs should be done
res = tres.load_results(full_path)
if objective == "max_steps":
m, s, n = tres.avg_quantity(res, "steps")
val = -m
std = s[-1]
elif objective == "min_steps":
m, s, n = tres.avg_quantity(res, "steps")
val = m
std = s[-1]
elif objective == "max_reward":
m, s, n = tres.avg_quantity(res, "return")
val = -m
std = s[-1]
else:
print("unknown objective")
weights = (np.arange(len(val)) + 1) ** 2
loss = old_div((val * weights).sum(), weights.sum())
print(time.ctime())
print("Parameters", hyperparam)
print("Loss", loss)
# use #steps/eps at the moment
return {"loss": loss,
"num_trials": n[-1],
"status": hyperopt.STATUS_OK,
"std_last_mean": std}
if parallelization == "condor_all":
trials = CondorTrials(path=path, ids=list(range(1, trials_per_point + 1)),
setting=setting, objective=objective)
domain = hyperopt.Domain(dummy_f, space, rseed=123)
rval = hyperopt.FMinIter(hyperopt.rand.suggest, domain, trials,
max_evals=30,
max_queue_len=30)
rval.exhaust()
rval = hyperopt.FMinIter(hyperopt.tpe.suggest, domain, trials,
max_evals=max_evals,
max_queue_len=1)
rval.exhaust()
best = trials.argmin
elif parallelization == "condor_full":
trials = _search_condor_parallel(path=path, setting=setting,
objective=objective,
space=space, max_evals=max_evals,
trials_per_point=trials_per_point)
best = trials.argmin
else:
trials = hyperopt.Trials()
best = hyperopt.fmin(f, space=space, algo=hyperopt.tpe.suggest,
max_evals=max_evals, trials=trials)
with open(os.path.join(path, 'trials.pck'), 'w') as f:
pickle.dump(trials, f)
return best, trials
|
rlpy/rlpy
|
rlpy/Tools/hypersearch.py
|
Python
|
bsd-3-clause
| 12,247
|
import inspect
import os
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from .exception import StashException
class FileStatus(object):
"""Enum for all possible file states that are handled by stash."""
Added, Removed = range(2)
class Repository(object):
"""Abstract class that defines an interface for all functionality required
by :py:class:`~stash.stash.Stash` to properly interface with a version
control system.
"""
__metaclass__ = ABCMeta
def __init__(self, path, create=False):
"""Creating a concrete repository instance is done using the factory
method :py:meth:`~stash.repository.Repository.__new__`. After the
factory has created a class instance, the repository is initialized by
specifying a *path* within the repository.
"""
self.root_path = self.get_root_path(path)
"""Root path of the repository."""
# In case no valid repository could be found, and one should be created,
# do so.
if create and self.root_path is None:
# In case the repository path does not yet exist, create it first.
if not os.path.exists(path):
os.mkdir(path)
# Make sure that the root path of the repository points to the
# specified root path.
self.root_path = os.path.abspath(path)
# Finally, create the repository.
self.init()
super(Repository, self).__init__(self)
def __new__(cls, path, create=False):
"""Factory that will return the right repository wrapper depending on
the repository type that is detected for *path*.
:raises: :py:exc:`~stash.exception.StashException` in case no repository is
found at *path*.
"""
# Iterate over all repository implementations, and for each
# implementation, determine whether it can find a root path for the
# repository specified at the specified path.
if cls == Repository:
for name, repository_cls in inspect.getmembers(sys.modules[cls.__module__]):
if inspect.isclass(repository_cls) and Repository in repository_cls.__bases__:
repository_root = repository_cls.get_root_path(path)
if repository_root is not None:
# A root path for the current repository implementation
# could be found, create an instance of this class.
return super(Repository, repository_cls).__new__(repository_cls)
raise StashException("no valid repository found at '%s'" % path)
else:
return super(Repository, cls).__new__(cls, path, create)
def _execute(self, command, stdin=None, stdout=subprocess.PIPE):
"""Executes the specified command relative to the repository root.
Returns a tuple containing the return code and the process output.
"""
process = subprocess.Popen(command, shell=True, cwd=self.root_path, stdin=stdin, stdout=stdout)
return (process.wait(), None if stdout is not subprocess.PIPE else process.communicate()[0].decode('utf-8'))
@abstractmethod
def add(self, file_names):
"""Adds all files in *file_names* to the repository."""
pass
def apply_patch(self, patch_path):
"""Applies the patch located at *patch_path*. Returns the return code of
the patch command.
"""
# Do not create .orig backup files, and merge files in place.
return self._execute('patch -p1 --no-backup-if-mismatch --merge', stdout=open(os.devnull, 'w'), stdin=open(patch_path, 'r'))[0]
@abstractmethod
def commit(self, message):
"""Commits all changes in the repository with the specified commit
*message*.
"""
pass
@abstractmethod
def diff(self):
"""Returns a diff text for all changes in the repository."""
pass
@abstractmethod
def init(self, path):
"""Creates a repository at the specified *path*."""
pass
@abstractmethod
def remove(self, file_names):
"""Removes all files in *file_names* from the repository."""
pass
@abstractmethod
def revert_all(self):
"""Reverts all changes in a repository without creating any backup
files.
"""
pass
@classmethod
@abstractmethod
def get_root_path(cls, path):
"""Returns the root path for the repository location *path*. In case
*path* is not part of a repository, `None` is returned.
"""
pass
@abstractmethod
def status(self):
"""Returns the current status of all files in the repository."""
pass
class MercurialRepository(Repository):
"""Concrete implementation of :py:class:`~stash.repository.Repository` for
Mercurial repositories.
"""
def add(self, file_names):
"""See :py:meth:`~stash.repository.Repository.add`."""
self._execute('hg add %s' % (' '.join(file_names)))
def commit(self, message):
"""See :py:meth:`~stash.repository.Repository.commit`."""
self._execute('hg ci -m "%s" -u anonymous' % message)
def diff(self):
"""See :py:meth:`~stash.repository.Repository.diff`."""
return self._execute('hg diff -a')[1]
def init(self):
"""See :py:meth:`~stash.repository.Repository.init`."""
self._execute('hg init')
def remove(self, file_names):
"""See :py:meth:`~stash.repository.Repository.remove`."""
self._execute('hg rm %s' % (' '.join(file_names)))
def revert_all(self):
"""See :py:meth:`~stash.repository.Repository.revert_all`."""
self._execute('hg revert -q -C --all')
@classmethod
def get_root_path(self, path):
"""See :py:meth:`~stash.repository.Repository.get_root_path`."""
# Look at the directories present in the current working directory. In case
# a .hg directory is present, we know we are in the root directory of a
# Mercurial repository. In case no repository specific folder is found, and
# the current directory has a parent directory, look if a repository
# specific directory can be found in the parent directory.
while path != '/':
if '.hg' in os.listdir(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
# No Mercurial repository found.
return None
def status(self):
"""See :py:meth:`~stash.repository.Repository.status`."""
result = set()
for line in self._execute('hg stat')[1].splitlines():
if line[0] == '?':
result.add((FileStatus.Added, line[2:].strip()))
elif line[0] == '!':
result.add((FileStatus.Removed, line[2:].strip()))
return result
class SubversionRepository(Repository):
"""Concrete implementation of :py:class:`~stash.repository.Repository` for
Subversion repositories.
"""
def add(self, file_names):
"""See :py:meth:`~stash.repository.Repository.add`."""
self._execute('svn add --parents %s' % (' '.join(file_names)))
def commit(self, message):
"""See :py:meth:`~stash.repository.Repository.commit`."""
self._execute('svn ci -m "%s" --username anonymous' % message)
def diff(self):
"""See :py:meth:`~stash.repository.Repository.diff`."""
return self._execute('svn diff --git')[1]
def init(self):
"""See :py:meth:`~stash.repository.Repository.init`."""
self._execute('svnadmin create --fs-type fsfs .svn-db')
self._execute('svn co file://%s/.svn-db .' % self.root_path)
def remove(self, file_names):
"""See :py:meth:`~stash.repository.Repository.remove`."""
self._execute('svn rm %s' % (' '.join(file_names)))
def revert_all(self):
"""See :py:meth:`~stash.repository.Repository.revert_all`."""
self._execute('svn revert -R -q .')
@classmethod
def get_root_path(self, path):
"""See :py:meth:`~stash.repository.Repository.get_root_path`."""
# Look at the directories present in the current working directory. In
# case a .svn directory is present, we know we are in the root directory
# of a Subversion repository (for Subversion 1.7.x). In case no
# repository specific folder is found, and the current directory has a
# parent directory, look if a repository specific directory can be found
# in the parent directory.
while path != '/':
if '.svn' in os.listdir(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
# No Subversion repository found.
return None
def status(self):
"""See :py:meth:`~stash.repository.Repository.status`."""
result = set()
for line in self._execute('svn stat')[1].splitlines():
if line[0] == '?':
result.add((FileStatus.Added, line[2:].strip()))
elif line[0] == '!':
result.add((FileStatus.Removed, line[2:].strip()))
return result
|
ton/stash
|
stash/repository.py
|
Python
|
bsd-3-clause
| 9,261
|
"""Blog models."""
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
from arpegio.core.models import ContentMixin, Timestampable
from .managers import PostManager
class Post(ContentMixin, # pylint: disable=model-missing-unicode
Timestampable,
models.Model):
"""Post Model."""
STATUS_CHOICES = (('D', 'Draft'),
('PB', 'Public'),
('PV', 'Private'),
('T', 'Trash'),
)
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
excerpt = models.TextField(blank=True, null=True)
featured_image = models.ImageField(upload_to='post_covers',
blank=True,
null=True
)
status = models.CharField(max_length=2, choices=STATUS_CHOICES)
sticky = models.BooleanField(default=False)
objects = PostManager()
def get_absolute_url(self):
"""Get the absolute url of a post"""
return reverse('blog:post', kwargs={'slug': self.slug})
class Meta:
ordering = ['-creation_date']
|
arpegio-dj/arpegio
|
arpegio/blog/models.py
|
Python
|
bsd-3-clause
| 1,334
|
# -*- coding: utf-8 -*-
import time
import os,sys
import itertools
import math
import argparse
import numpy as np
from multiprocessing import Pool
from hashlib import sha1
import random, struct
from random import sample,choice
from sklearn import metrics
#we truncate sha1 for now. We should probably replace this with a proper hash function.
M_PRIME = (1 << 89) - 1 #(x << n) is x shifted left by n bit
MAX_HASH = (1 << 64) - 1
NUM_PERM=100
random.seed(427)
A,B = np.array([(random.randint(1, M_PRIME),random.randint(0, M_PRIME)) for _ in range(NUM_PERM)]).T
#############
# functions #
#############
def set_permutations(numperm):
NUM_PERM=numperm
A,B = np.array([(random.randint(1, M_PRIME),random.randint(0, M_PRIME)) for _ in range(NUM_PERM)]).T
def get_permuted_hashes(token):
# get a hash value
#abusing sha1 and truncating to 12 digit number
hv=int(sha1(token).hexdigest(),16)% (10 ** 12)
#do Carter and Wegman like hashing.
return np.bitwise_and((A * hv + B) % M_PRIME,MAX_HASH)
def get_clusters(fn):
with open(fn,'r') as f:
next(f)#skip header
for line in f:
a=line.split(',')
yield a[0],a[2]
def get_lsh(sig,nbands):
for i,band in enumerate(np.array_split(sig,nbands)):
yield sha1("ab" + str(band) + "ba"+str(i)).digest()
def get_bandwidth(n, tr):
"""
Threshold tr = (1/b) ** (1/r) where
b #bands
r #rows per band
n = b * r #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (tr ** r)
except:
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def connected(seed,lshdict,doc2lsh,t):
'''
Computes clusters based on the lsh bucket candidates.
We do not actually check the full connected component.
We only check for similar docs amongst the lsh candidates for each cluster member.
'''
cluster=set([seed])
#get candidates and flatten list
base=set([seed])
while len(base)>0:
s=base.pop()
#get candidates and flatten list
candidates=set(itertools.chain.from_iterable([lshdict[sig] for sig in doc2lsh[s]]))
m1=hashcorp[s]
for cand in candidates:
if cand in cluster:continue#don't check if we've already added this
m2=hashcorp[cand]
if jaccard(m1,m2) >=t:
cluster.add(cand)
base.add(cand)
#all candidates have been checked
return cluster
def jaccard(h1,h2):
'''
Compute jaccard similarity between two minhash signatures.
Make sure to only compute jaccard similarity for hashes created with same hash functions (i.e. same seed for random permutation)
'''
return np.float(np.count_nonzero(h1==h2)) /np.float(h2.size)
def near_duplicates(seed,lshdict,doc2lsh,t):
cluster=set([seed])
#get candidates and flatten list
candidates=set(itertools.chain.from_iterable([lshdict[sig] for sig in doc2lsh[seed]]))
m1=hashcorp[seed]
for cand in candidates:
if cand in cluster:continue#don't check if we've already added this
m2=hashcorp[cand]
if jaccard(m2,m1) >=t:
cluster.add(cand)
#all candidates have been checked
return cluster
def compute_clusters(obj):
thr=obj[0]
bandwidth=get_bandwidth(NUM_PERM, thr)#r
bands=int(math.ceil(float(NUM_PERM)/float(bandwidth)))#b
print("starting calculations for threshold "+str(thr)+"\nnumber of lsh bands: "+str(bands))
sys.stdout.flush()
start_time = time.time()
doc_to_lsh={}
lsh_dict={}
for key,m in hashcorp.items():
#compute lsh
signatures = [sig for sig in get_lsh(m,bands)]
#store signatures for this document
doc_to_lsh[key]=signatures
#store lsh signature to key
for sig in signatures:
if sig in lsh_dict:
lsh_dict[sig].append(key)
else:
lsh_dict[sig]=[key]
print(("Calculating lsh signatures for threshold "+str(thr)+" took\n ---%s seconds ---\n" % (time.time() - start_time)))
sys.stdout.flush()
#compute connected components
start_time = time.time()
doc2cluster={}
count=0
for doc in hashcorp:
if doc not in doc2cluster:
cl=connected(doc,lsh_dict,doc_to_lsh,thr)
doc2cluster.update({i:count for i in cl })
count+=1
print(("Computing connected components for threshold: "+str(thr)+" took\n--- %s seconds ---\n" % (time.time() - start_time)))
print("write results to file")
start_time = time.time()
f=open(outdir+'/doc2cluster_'+str(thr)+'_'+suffix+'.csv','w')
f.write('line,cluster\n')
for key, value in doc2cluster.items():
f.write(str(key)+','+str(value)+'\n')
f.close()
print(("Writing results to files for threshold "+str(thr)+" took:\n--- %s seconds ---\n" % (time.time() - start_time)))
#Set up command line arguments
parser = argparse.ArgumentParser(description='Calculate connected components of documents with given threshold(s)')
parser.add_argument("-t", dest="threshold",type=float,help="threshold for ER", metavar="T")
parser.add_argument("-lt", dest="lt",type=float,help="lower threshold for ER", metavar="TL")
parser.add_argument("-ut", dest="ut",type=float,help="upper threshold for ER", metavar="TU")
parser.add_argument("-out", dest="out",help="output directory", metavar="OUT")
parser.add_argument("-steps", dest="steps",type=float,help="number of steps between lower and upper threshold", metavar="TSTEP")
parser.add_argument("-sigl", dest="num_permutations",type=int,help="minhash signature length", metavar="SIG")
parser.add_argument("-suff", dest="suffix",help="output file suffix", metavar="S")
parser.add_argument("-infile", dest="infile",help="input file",required=True, metavar="IF")
parser.add_argument('-header', dest='header', action='store_true')
parser.add_argument('-near_dups', dest='near_dups',help="Do near duplicate detection. If this is not set, connected components will be computed", action='store_true')
parser.add_argument("-p", dest="nump", required=False,type=int,help="number of processes for multithreading", metavar="NUMP")
parser.set_defaults(match=False)
parser.set_defaults(header=True)
parser.set_defaults(near_dups=True)
parser.set_defaults(threshold=None)
parser.set_defaults(num_permutations=100)
parser.set_defaults(lt=0.0)
parser.set_defaults(ut=1.0)
parser.set_defaults(steps=2)
parser.set_defaults(nump=1)
parser.set_defaults(suffix='')
parser.set_defaults(out='out')
if __name__ == "__main__":
#fetch command line arguments
args = parser.parse_args()
num_processes=args.nump
suffix=args.suffix
if NUM_PERM!=args.num_permutations:
set_permutations(args.num_permutations)
#create output directory if it does not exist
outdir=args.out
if not os.path.exists(outdir):
os.makedirs(outdir)
thresholds=[]
lt=args.lt
near_dups=args.near_dups
ut=args.ut
steps=args.steps
if args.threshold is not None:
thresholds=[args.threshold]
else:
if None in [lt,ut,steps]:
print("need lower threshold, upper threshold, and number of steps")
exit()
else:
thresholds=np.linspace(lt, ut, num=steps)
#load text. Flat file for now
print('load text')
start_time = time.time()
with open(args.infile,'r') as f:
if args.header:
next(f)
#TODO test robustness
#mycorpus=[(i,set(line.encode('utf8', 'ignore').lower().split())) for i,line in enumerate(f)]
mycorpus=[(i,set(line.lower().split())) for i,line in enumerate(f)]
print(("--- %s seconds ---" % (time.time() - start_time)))
print('Calculate minhash signatures')
start_time = time.time()
#prepare dictionary of hashes
hashcorp=dict.fromkeys([tup[0] for tup in mycorpus])
#compute hashes
for key,doc in mycorpus:
#compute minhash signature
hashvalues=np.empty(NUM_PERM)
hashvalues.fill(MAX_HASH)
for token in doc:
#np.minimum(get_permuted_hashes(token.encode('utf-8','ignore')), hashvalues)
np.minimum(get_permuted_hashes(token), hashvalues)
hashcorp[key]=hashvalues
print(("--- %s seconds ---" % (time.time() - start_time)))
if num_processes> 1:
if len(thresholds)<num_processes:
num_processes=len(thresholds)
p=Pool(num_processes)
assignment=[ (x,) for x in thresholds]
p.map(compute_clusters,assignment)
else:
for x in thresholds:
compute_clusters((x,))
|
benbo/FastDocumentClusters
|
fast_document_clusters.py
|
Python
|
bsd-3-clause
| 8,883
|
# sqlalchemy/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import inspect
import sys
import sqlalchemy.exc as exceptions
from sqlalchemy.sql import (
alias,
and_,
asc,
between,
bindparam,
case,
cast,
collate,
delete,
desc,
distinct,
except_,
except_all,
exists,
extract,
func,
insert,
intersect,
intersect_all,
join,
literal,
literal_column,
modifier,
not_,
null,
or_,
outerjoin,
outparam,
over,
select,
subquery,
text,
tuple_,
type_coerce,
union,
union_all,
update,
)
from sqlalchemy.types import (
BLOB,
BOOLEAN,
BigInteger,
Binary,
Boolean,
CHAR,
CLOB,
DATE,
DATETIME,
DECIMAL,
Date,
DateTime,
Enum,
FLOAT,
Float,
INT,
INTEGER,
Integer,
Interval,
LargeBinary,
NCHAR,
NVARCHAR,
NUMERIC,
Numeric,
PickleType,
REAL,
SMALLINT,
SmallInteger,
String,
TEXT,
TIME,
TIMESTAMP,
Text,
Time,
TypeDecorator,
Unicode,
UnicodeText,
VARCHAR,
)
from sqlalchemy.schema import (
CheckConstraint,
Column,
ColumnDefault,
Constraint,
DDL,
DefaultClause,
FetchedValue,
ForeignKey,
ForeignKeyConstraint,
Index,
MetaData,
PassiveDefault,
PrimaryKeyConstraint,
Sequence,
Table,
ThreadLocalMetaData,
UniqueConstraint,
)
from sqlalchemy.engine import create_engine, engine_from_config
__all__ = sorted(name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj)))
__version__ = '0.7.5'
del inspect, sys
from sqlalchemy import util as _sa_util
_sa_util.importlater.resolve_all()
|
intelie/pycollector
|
src/third/sqlalchemy/__init__.py
|
Python
|
bsd-3-clause
| 1,975
|
"""Models lslsl."""
from django.db import models
estado_civil = (
('Soltero', 'Soltero'),
('Casado', 'Casado'),
('Divorciado', 'Divorciado'),
('Viudo', 'Viudo'),
('Conviviente', 'Conviviente'),
)
sexo = (
('Femenino', 'Femenino'),
('Masculino', 'Masculino'),
)
ocupacion = (
('EstudianteFIA', 'Estudiante FIA'),
('EstudianteFCE', 'Estudiante FCE'),
('EstudianteSALUD', 'Estudiante SALUD'),
('EstudianteFACIHED', 'Estudiante FACIHED'),
('DocenteFIA', 'Docente FIA'),
('DocenteFCE', 'Docente FCE'),
('DocenteSALUD', 'Docente SALUD'),
('DocenteFACIHED', 'Docente FACIHED'),
('PersonalAdministrativo', 'Personal Administrativo'),
('EstudianteCAT', 'Estudiante CAT'),
('DocenteCAT', 'Docente CAT'),
('Visitas', 'Visitas'),
)
IMC = (
('PesoBajo', 'Peso Bajo'),
('PesoNormal', 'Peso Normal'),
('Sobrepeso', 'Sobre peso'),
('Obesidad', 'Obesidad'),
('ObesidadSevera', 'Obesidad Severa')
)
class Usuario(models.Model):
"""Class Model Usuario. """
nombre = models.CharField(max_length=60)
apellidos = models.CharField(max_length=60)
dni = models.CharField(max_length=10)
#image=models.ImageField(upload_to="", blank=True, null=True)
sexo = models.CharField(max_length=20, choices=sexo)
ocupacion = models.CharField(max_length=100)
telefono = models.CharField(max_length=10)
estado = models.BooleanField()
def __str__(self):
return self.nombre
class Departamento(models.Model):
codigo = models.IntegerField(unique=True)
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = "Departamento"
verbose_name_plural = "Departamento"
def __str__(self):
return self.nombre
class Provincia(models.Model):
nombre = models.CharField(max_length=100)
departamento = models.ForeignKey(Departamento)
class Meta:
verbose_name = "Provincia"
verbose_name_plural = "Provincia"
def __str__(self):
return self.nombre
class Distrito(models.Model):
nombre = models.CharField(max_length=100)
provincia = models.ForeignKey(Provincia)
class Meta:
verbose_name = "Distrito"
verbose_name_plural = "Distrito"
def __str__(self):
return self.nombre
class Persona(models.Model):
nombres = models.CharField(max_length=40)
apellido_paterno = models.CharField(max_length=40)
apellido_materno = models.CharField(max_length=40)
departamento = models.ForeignKey(Departamento, blank=True, null=True)
provincia = models.ForeignKey(Provincia, blank=True, null=True)
distrito = models.ForeignKey(Distrito, blank=True, null=True)
dni = models.CharField(max_length=8, unique=True)
fecha_nacimiento = models.DateField()
codigo = models.CharField(max_length=9, unique=True,blank=True, null=True)
edad = models.IntegerField(blank=True, null=True)
estado_civil = models.CharField(max_length=20, choices=estado_civil)
sexo = models.CharField(max_length=19, choices=sexo)
telefono = models.IntegerField()
ocupacion = models.CharField(max_length=20, choices=ocupacion)
direccion_actual = models.CharField(max_length=100)
contacto = models.CharField(max_length=10)
es_estudiante = models.BooleanField(default=True)
es_matriculado = models.BooleanField(default=True)
class Meta:
verbose_name = "Persona"
verbose_name_plural = "Personas"
def __str__(self):
return "%s %s %s" %(self.nombres, self.apellido_paterno,self.apellido_materno)
class Historia(models.Model):
persona = models.OneToOneField(Persona)
numero = models.IntegerField(unique=True)
fecha_apertura = models.DateTimeField(auto_now_add=True)
estado = models.BooleanField(default=True)
class Meta:
verbose_name = "Historia"
verbose_name_plural = "Historias"
def __str__(self):
return "%s" %self.numero
class Consulta(models.Model):
# usuario = models.ForeignKey(Usuario)
fecha = models.DateTimeField(auto_now_add=True)
enfermedad_actual = models.TextField(blank=True, null=True)
examen_fisico = models.TextField(blank=True, null=True)
historia = models.ForeignKey(Historia)
hecho = models.BooleanField(default=False)
estado = models.BooleanField(default=True)
class Meta:
verbose_name = "Consulta"
verbose_name_plural = "Consultas"
def __str__(self):
return "%s" % self.historia.persona.nombres
class AntecedenteMedico(models.Model):
historia = models.ForeignKey(Historia)
antecedente_morbidos = models.TextField(blank=True, null=True, verbose_name='Antecedentes mórbidos')
antecedente_ginecoobstetrico = models.TextField(blank=True, null=True, verbose_name='Antecedentes ginecoobstétricos')
habito = models.TextField(blank=True, null=True, verbose_name='Hábitos')
antecedente_medicamento = models.TextField(blank=True, null=True, verbose_name='Antecedentes sobre uso de medicamentos.')
alergia = models.TextField(blank=True, null=True, verbose_name='Alergias')
antecedente_personal_social = models.TextField(blank=True, null=True, verbose_name='Antecedentes sociales y personales.')
atecedente_familiar = models.TextField(blank=True, null=True, verbose_name='Antecedentes familiares')
inmunizacion = models.TextField(blank=True, null=True, verbose_name='Inmunizaciones')
class Meta:
verbose_name = "AntecedenteMedico"
verbose_name_plural = "AntecedenteMedicos"
def __str__(self):
return self.alergia
class FuncionesVitales(models.Model):
frecuencia_cardiaca = models.IntegerField()
frecuencia_respiratoria = models.IntegerField()
presion_arterial = models.IntegerField()
temperatura = models.IntegerField()
peso = models.IntegerField()
talla = models.IntegerField()
masa_corporal = models.IntegerField()
diagnostico_mc = models.CharField(max_length=15, choices=IMC, default='PesoNormal')
consulta = models.ForeignKey(Consulta)
class Meta:
verbose_name = "Funciones Vitales"
verbose_name_plural = "Funciones Vitales"
def __str__(self):
return self.diagnostico_mc
class Diagnostico(models.Model):
codigo = models.CharField(max_length=10)
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = "Diagnostico"
verbose_name_plural = "Diagnosticos"
def __str__(self):
return self.nombre
class DiagnosticoConsulta(models.Model):
diagnostico = models.ForeignKey(Diagnostico)
consulta =models.ForeignKey (Consulta)
class Meta:
verbose_name = "Diagnostico por Consulta"
verbose_name_plural = "Diagnostico Consultas"
def __str__(self):
return "%s - %s" %(self.diagnostico,self.consulta)
class Producto(models.Model):
codigo = models.IntegerField(unique=True)
descripcion = models.CharField(max_length=100)
stock = models.IntegerField()
precio_compra = models.FloatField()
class Meta:
verbose_name = "Producto"
verbose_name_plural = "Productos"
def __str__(self):
return self.descripcion
class UnidadMedida(models.Model):
codigo = models.CharField(max_length=10)
nombre = models.CharField(max_length=50)
class Meta:
verbose_name = "Unidad de Medida"
verbose_name_plural = "Unidades de Medida"
def __str__(self):
return self.nombre
class Tratamiento(models.Model):
fecha = models.DateTimeField(auto_now_add=True)
recomendacion = models.TextField()
consulta = models.ForeignKey(Consulta)
class Meta:
verbose_name = "Tratamiento"
verbose_name_plural = "Tratamientos"
def __str__(self):
return "%s" % self.fecha
class DetalleReceta(models.Model):
precio_venta = models.FloatField(blank=True, null=True)
producto = models.ForeignKey(Producto)
cantidad = models.IntegerField(blank=True, null=True)
presentacion = models.ForeignKey(UnidadMedida)
importe = models.FloatField(blank=True, null=True)
dosis = models.IntegerField(blank=True, null=True)
periodo = models.IntegerField(blank=True, null=True)
tratamiento = models.ForeignKey(Tratamiento)
class Meta:
verbose_name = "Detalle de Receta"
verbose_name_plural = "Detalles de Receta"
def __str__(self):
return self.producto.descripcion
class Periodo(models.Model):
ciclo = models.CharField(unique=True, max_length=10)
fecha = models.DateField()
class Meta:
verbose_name = "Periodo"
verbose_name_plural = "Periodos"
def __str__(self):
return "%s" %self.ciclo
class Laboratorio(models.Model):
hemoglobina = models.IntegerField()
endocritos = models.IntegerField()
globulos_rojos = models.IntegerField()
globulos_blancos = models.IntegerField()
tipo_sangre = models.CharField(max_length=10)
periodo = models.ForeignKey(Periodo)
historia = models.ForeignKey(Historia)
class Meta:
verbose_name = "Prueba de Laboratorio"
verbose_name_plural = "Pruebas de Laboratorio"
def __unicode__(self):
return self.hemoglobina
class ConsultaEmergencia(models.Model):
fecha = models.DateTimeField(auto_now_add=True)
historia = models.ForeignKey(Historia)
class Meta:
verbose_name = "Consulta por Emergencia"
verbose_name_plural = "Consultas por Emergencia"
def __str__(self):
return self.historia.nombres
class ReporteAtencion(models.Model):
pacientes=models.ForeignKey(Consulta)
mes=models.IntegerField()
dia=models.IntegerField()
|
upeu-jul-20161-epis-ads2/MedicFast
|
apps/atencion/models.py
|
Python
|
bsd-3-clause
| 9,675
|
from django.conf import settings
from corehq.apps.commtrack.helpers import make_supply_point
from corehq.form_processor.abstract_models import AbstractSupplyInterface
from corehq.form_processor.backends.sql.dbaccessors import CaseAccessorSQL
from corehq.util.soft_assert import soft_assert
_supply_point_dynamically_created = soft_assert(
to='{}@{}'.format('skelly', 'dimagi.com'),
exponential_backoff=False,
)
class SupplyPointSQL(AbstractSupplyInterface):
@classmethod
def get_or_create_by_location(cls, location):
sp = SupplyPointSQL.get_by_location(location)
if not sp:
sp = make_supply_point(location.domain, location)
if not settings.UNIT_TESTING:
_supply_point_dynamically_created(False, 'supply_point_dynamically_created, {}, {}, {}'.format(
location.name,
sp.case_id,
location.domain,
))
return sp
@classmethod
def get_by_location(cls, location):
return location.linked_supply_point()
@staticmethod
def get_closed_and_open_by_location_id_and_domain(domain, location_id):
return CaseAccessorSQL.get_case_by_location(domain, location_id)
@staticmethod
def get_supply_point(supply_point_id):
return CaseAccessorSQL.get_case(supply_point_id)
@staticmethod
def get_supply_points(supply_point_ids):
return list(CaseAccessorSQL.get_cases(supply_point_ids))
|
qedsoftware/commcare-hq
|
corehq/form_processor/backends/sql/supply.py
|
Python
|
bsd-3-clause
| 1,490
|
from django.conf.urls.defaults import *
from django.views.generic.list_detail import object_detail, object_list
from models import Article, Category
from views import article_detail
article_info = {
'queryset': Article.objects.all(),
'template_object_name': 'article',
}
category_info = {
'queryset': Category.objects.all(),
'template_object_name': 'category',
}
urlpatterns = patterns('',
url(r'^$',
object_list,
article_info,
name='article-list'),
url(r'^categories/$',
object_list,
category_info,
name='category-list'),
url(r'^category/(?P<slug>[\w-]+)/$',
object_detail,
dict(category_info, slug_field='slug'),
name='category-detail'),
url(r'^(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<slug>[\w-]+)/$',
article_detail,
name='article-detail'),
)
|
nnrcschmdt/atix-django
|
atix/articles/urls.py
|
Python
|
bsd-3-clause
| 869
|
from .RFScore import rfscore
from .NNScore import nnscore
__all__ = ['rfscore', 'nnscore']
|
mwojcikowski/opendrugdiscovery
|
oddt/scoring/functions/__init__.py
|
Python
|
bsd-3-clause
| 92
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_states.Look_at_sound import LookAtSound
from sara_flexbe_states.get_speech import GetSpeech
from flexbe_states.wait_state import WaitState
from sara_flexbe_states.regex_tester import RegexTester
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Fri Oct 26 2018
@author: Jeffrey
'''
class test_look_at_soundSM(Behavior):
'''
test look at sound
'''
def __init__(self):
super(test_look_at_soundSM, self).__init__()
self.name = 'test_look_at_sound'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:310 y:409, x:130 y:365
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:17 y:314
_sm_group_0 = OperatableStateMachine(outcomes=['done'], output_keys=['Words'])
with _sm_group_0:
# x:84 y:76
OperatableStateMachine.add('speech',
GetSpeech(watchdog=8),
transitions={'done': 'wait', 'nothing': 'speech', 'fail': 'speech'},
autonomy={'done': Autonomy.Off, 'nothing': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'words': 'Words'})
# x:92 y:221
OperatableStateMachine.add('wait',
WaitState(wait_time=1),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off})
# x:30 y:365
_sm_group_2_1 = OperatableStateMachine(outcomes=['done'])
with _sm_group_2_1:
# x:32 y:131
OperatableStateMachine.add('test_sound',
LookAtSound(moveBase=True),
transitions={'done': 'test_sound'},
autonomy={'done': Autonomy.Off})
# x:30 y:365, x:337 y:100, x:230 y:365
_sm_look_and_wait_2 = ConcurrencyContainer(outcomes=['done'], output_keys=['Words'], conditions=[
('done', [('Group', 'done')]),
('done', [('Group_2', 'done')])
])
with _sm_look_and_wait_2:
# x:75 y:82
OperatableStateMachine.add('Group_2',
_sm_group_2_1,
transitions={'done': 'done'},
autonomy={'done': Autonomy.Inherit})
# x:28 y:173
OperatableStateMachine.add('Group',
_sm_group_0,
transitions={'done': 'done'},
autonomy={'done': Autonomy.Inherit},
remapping={'Words': 'Words'})
with _state_machine:
# x:60 y:65
OperatableStateMachine.add('say marco!',
SaraSay(sentence="Marco?", input_keys=[], emotion=1, block=True),
transitions={'done': 'Look and wait'},
autonomy={'done': Autonomy.Off})
# x:335 y:191
OperatableStateMachine.add('Look and wait',
_sm_look_and_wait_2,
transitions={'done': 'is polo!'},
autonomy={'done': Autonomy.Inherit},
remapping={'Words': 'Words'})
# x:281 y:85
OperatableStateMachine.add('hey',
SaraSay(sentence="Hey! You need to say polo!", input_keys=[], emotion=1, block=True),
transitions={'done': 'say marco!'},
autonomy={'done': Autonomy.Off})
# x:520 y:36
OperatableStateMachine.add('is polo!',
RegexTester(regex="[^o]*o[^o\ ]*o[^o]*"),
transitions={'true': 'say marco!', 'false': 'hey'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'text': 'Words', 'result': 'result'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
WalkingMachine/sara_behaviors
|
sara_flexbe_behaviors/src/sara_flexbe_behaviors/test_look_at_sound_sm.py
|
Python
|
bsd-3-clause
| 4,211
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
# Utility function to read README file
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='feincms-tinymce-image-browser',
version='0.1.1',
description="Provides a way to select images from the FeinCMS ImageLibrary in the TinyMCE WYSIWYG.",
author='Benjamin W Stookey',
author_email='ben.stookey@gmail.com',
url='https://github.com/jamstooks/feincms-tinymce-image-browser',
long_description=read("README.md"),
packages=[
'tinymce_browser',
'tinymce_browser.tests',
],
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
],
# test_suite='tests.main',
install_requires=['feincms'],
)
|
jamstooks/feincms-tinymce-image-browser
|
setup.py
|
Python
|
bsd-3-clause
| 1,084
|
# -*- coding: utf-8 -*-
#
# records.py
# csvdiff
#
from typing.io import TextIO
from typing import Any, Dict, Tuple, Iterator, List, Sequence
import csv
import sys
from . import error
Column = str
PrimaryKey = Tuple[str, ...]
Record = Dict[Column, Any]
Index = Dict[PrimaryKey, Record]
class InvalidKeyError(Exception):
pass
class SafeDictReader:
"""
A CSV reader that streams records but gives nice errors if lines fail to parse.
"""
def __init__(self, istream: TextIO, sep: str = ',') -> None:
# bump the built-in limits on field sizes
csv.field_size_limit(2**24)
self.reader = csv.DictReader(istream, delimiter=sep)
def __iter__(self) -> Iterator[Record]:
for lineno, r in enumerate(self.reader, 2):
if any(k is None for k in r):
error.abort('CSV parse error on line {}'.format(lineno))
yield dict(r)
@property
def fieldnames(self):
return self.reader._fieldnames
def load(file_or_stream: Any, sep: str = ',') -> SafeDictReader:
istream = (open(file_or_stream)
if not hasattr(file_or_stream, 'read')
else file_or_stream)
return SafeDictReader(istream, sep=sep)
def index(record_seq: Iterator[Record], index_columns: List[str]) -> Index:
if not index_columns:
raise InvalidKeyError('must provide on or more columns to index on')
try:
obj = {
tuple(r[i] for i in index_columns): r
for r in record_seq
}
return obj
except KeyError as k:
raise InvalidKeyError('invalid column name {k} as key'.format(k=k))
def filter_ignored(index: Index, ignore_columns: List[Column]) -> Index:
for record in index.values():
# edit the record in-place
for column in ignore_columns:
del record[column]
return index
def save(records: Sequence[Record], fieldnames: List[Column], ostream: TextIO):
writer = csv.DictWriter(ostream, fieldnames)
writer.writeheader()
for r in records:
writer.writerow(r)
def sort(records: Sequence[Record]) -> List[Record]:
"Sort records into a canonical order, suitable for comparison."
return sorted(records, key=_record_key)
def _record_key(record: Record) -> List[Tuple[Column, str]]:
"An orderable representation of this record."
return sorted(record.items())
|
larsyencken/csvdiff
|
csvdiff/records.py
|
Python
|
bsd-3-clause
| 2,402
|
# proxy module
from pyface.xrc_dialog import *
|
enthought/etsproxy
|
enthought/pyface/xrc_dialog.py
|
Python
|
bsd-3-clause
| 47
|
from django.contrib.postgres.signals import register_hstore_handler
from django.db.migrations.operations.base import Operation
class CreateExtension(Operation):
reversible = True
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.vendor != 'postgresql':
return
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % schema_editor.quote_name(self.name))
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("DROP EXTENSION %s" % schema_editor.quote_name(self.name))
def describe(self):
return "Creates extension %s" % self.name
class BtreeGinExtension(CreateExtension):
def __init__(self):
self.name = 'btree_gin'
class CITextExtension(CreateExtension):
def __init__(self):
self.name = 'citext'
class CryptoExtension(CreateExtension):
def __init__(self):
self.name = 'pgcrypto'
class HStoreExtension(CreateExtension):
def __init__(self):
self.name = 'hstore'
def database_forwards(self, app_label, schema_editor, from_state, to_state):
super().database_forwards(app_label, schema_editor, from_state, to_state)
# Register hstore straight away as it cannot be done before the
# extension is installed, a subsequent data migration would use the
# same connection
register_hstore_handler(schema_editor.connection)
class TrigramExtension(CreateExtension):
def __init__(self):
self.name = 'pg_trgm'
class UnaccentExtension(CreateExtension):
def __init__(self):
self.name = 'unaccent'
|
alexallah/django
|
django/contrib/postgres/operations.py
|
Python
|
bsd-3-clause
| 1,801
|
# -*- encoding: utf-8 -*-
from . import db
class Constellation(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=False)
name = db.Column(db.String(100), nullable=False)
region_id = db.Column(db.Integer, db.ForeignKey('region.id'))
|
Kyria/LazyBlacksmith
|
lazyblacksmith/models/sde/constellation.py
|
Python
|
bsd-3-clause
| 266
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements lucene-specific adapters for the defined interface.
"""
__version__ = "$Revision-Id:$"
|
DLR-SC/DataFinder
|
src/datafinder/persistence/adapters/lucene/__init__.py
|
Python
|
bsd-3-clause
| 1,810
|
from __future__ import absolute_import
from datetime import datetime, timedelta
from sentry.models import GroupHash
from sentry.testutils import TestCase, SnubaTestCase
from sentry.utils import snuba
class SnubaUtilTest(TestCase, SnubaTestCase):
def test_filter_keys_set(self):
snuba.raw_query(
start=datetime.now(),
end=datetime.now(),
filter_keys={"project_id": set([1]), "logger": set(["asdf"])},
aggregations=[["count()", "", "count"]],
)
def test_shrink_timeframe(self):
now = datetime.now()
year_ago = now - timedelta(days=365)
issues = None
assert snuba.shrink_time_window(issues, year_ago) == year_ago
issues = []
assert snuba.shrink_time_window(issues, year_ago) == year_ago
group1 = self.create_group()
group1.first_seen = now - timedelta(hours=1)
group1.last_seen = now
group1.save()
GroupHash.objects.create(project_id=group1.project_id, group=group1, hash="a" * 32)
group2 = self.create_group()
GroupHash.objects.create(project_id=group2.project_id, group=group2, hash="b" * 32)
issues = [group1.id]
assert snuba.shrink_time_window(issues, year_ago) == now - timedelta(hours=1, minutes=5)
issues = [group1.id, group2.id]
assert snuba.shrink_time_window(issues, year_ago) == year_ago
# with pytest.raises(snuba.QueryOutsideGroupActivityError):
# # query a group for a time range before it had any activity
# snuba.raw_query(
# start=group1.first_seen - timedelta(days=1, hours=1),
# end=group1.first_seen - timedelta(days=1),
# filter_keys={
# 'project_id': [group1.project_id],
# 'issue': [group1.id],
# },
# aggregations=[
# ['count()', '', 'count'],
# ],
# )
def test_override_options(self):
assert snuba.OVERRIDE_OPTIONS == {"consistent": False}
with snuba.options_override({"foo": 1}):
assert snuba.OVERRIDE_OPTIONS == {"foo": 1, "consistent": False}
with snuba.options_override({"foo": 2}):
assert snuba.OVERRIDE_OPTIONS == {"foo": 2, "consistent": False}
assert snuba.OVERRIDE_OPTIONS == {"foo": 1, "consistent": False}
assert snuba.OVERRIDE_OPTIONS == {"consistent": False}
def test_valid_orderby(self):
assert snuba.valid_orderby("event.type")
assert snuba.valid_orderby("project.id")
assert snuba.valid_orderby(["event.type", "-id"])
assert not snuba.valid_orderby("project.name")
assert not snuba.valid_orderby("issue_count")
extra_fields = ["issue_count", "event_count"]
assert snuba.valid_orderby(["issue_count", "-timestamp"], extra_fields)
assert snuba.valid_orderby("issue_count", extra_fields)
assert not snuba.valid_orderby(["invalid", "issue_count"], extra_fields)
assert not snuba.valid_orderby(["issue_count", "invalid"], extra_fields)
|
mvaled/sentry
|
tests/snuba/test_util.py
|
Python
|
bsd-3-clause
| 3,144
|
__author__ = 'Afief'
from datetime import datetime
from peewee import CharField, TextField, BooleanField, ForeignKeyField, \
DateField
from apps.models import db
from apps.models.auth import User
class Phile(db.Model):
filename = CharField(max_length=100)
filetype = CharField(max_length=100)
filepath = TextField()
class Post(db.Model):
judul = CharField(max_length=100)
konten = TextField()
date_created = DateField(default=datetime.now)
publik = BooleanField(default=True)
author = ForeignKeyField(User)
class Meta:
order_by = ('-date_created',)
class MataKuliah(db.Model):
kode = CharField(max_length=5)
judul = CharField(max_length=100)
dosen = ForeignKeyField(User)
class Meta:
order_by = ('kode',)
class Tugas(db.Model):
judul = CharField(max_length=100)
keterangan = TextField(null=True)
mata_kuliah = ForeignKeyField(MataKuliah)
tanggal_dibuat = DateField(default=datetime.now)
tanggal_terakhir = DateField()
class Meta:
order_by = ('-id',)
class TugasFile(db.Model):
tugas = ForeignKeyField(Tugas)
phile = ForeignKeyField(Phile)
class KumpulTugas(db.Model):
tugas = ForeignKeyField(Tugas)
mahasiswa = ForeignKeyField(User)
tanggal_mengumpulkan = DateField(default=datetime.now)
phile = ForeignKeyField(Phile)
class Meta:
order_by = ('-tanggal_mengumpulkan',)
|
ap13p/elearn
|
apps/models/others.py
|
Python
|
bsd-3-clause
| 1,428
|
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.config import Config, ConfigBool, ConfigList
from go.vumitools.utils import MessageMetadataHelper
class OptOutHelperConfig(Config):
case_sensitive = ConfigBool(
"Whether case sensitivity should be enforced when checking message "
"content for opt outs",
default=False)
keywords = ConfigList(
"List of the keywords which count as opt outs",
default=())
class OptOutHelper(object):
def __init__(self, vumi_api, config):
self.vumi_api = vumi_api
self.config = OptOutHelperConfig(config)
self.optout_keywords = set([
self.casing(word) for word in self.config.keywords])
def casing(self, word):
if not self.config.case_sensitive:
return word.lower()
return word
def keyword(self, message):
keyword = (message['content'] or '').strip()
return self.casing(keyword)
@inlineCallbacks
def _optout_disabled(self, account, message):
msg_mdh = MessageMetadataHelper(self.vumi_api, message)
if account.disable_optouts:
returnValue(True)
elif msg_mdh.tag is not None:
tagpool_metadata = yield msg_mdh.get_tagpool_metadata()
returnValue(tagpool_metadata.get('disable_global_opt_out', False))
else:
returnValue(False)
@inlineCallbacks
def _is_optout(self, account, message):
if (yield self._optout_disabled(account, message)):
returnValue(False)
else:
returnValue(self.keyword(message) in self.optout_keywords)
@inlineCallbacks
def process_message(self, account, message):
helper_metadata = message['helper_metadata']
if 'optout' not in helper_metadata:
optout_metadata = {'optout': False}
helper_metadata['optout'] = optout_metadata
if (yield self._is_optout(account, message)):
optout_metadata['optout'] = True
optout_metadata['optout_keyword'] = self.keyword(message)
returnValue(message)
@staticmethod
def is_optout_message(message):
return message['helper_metadata'].get('optout', {}).get('optout')
|
praekelt/vumi-go
|
go/vumitools/opt_out/utils.py
|
Python
|
bsd-3-clause
| 2,270
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.six import string_types
from django.utils.translation import ugettext_lazy as _
import likert_field.forms as forms
@python_2_unicode_compatible
class LikertField(models.IntegerField):
"""A Likert field is simply stored as an IntegerField"""
description = _('Likert item field')
def __init__(self, *args, **kwargs):
"""
LikertField stores items with no answer as NULL
By default responses are optional, so 'blank' is True
"""
if kwargs.get('null', True):
kwargs['null'] = True
if kwargs.get('blank', True):
kwargs['blank'] = True
super(LikertField, self).__init__(*args, **kwargs)
def __str__(self):
return "%s" % force_text(self.description)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
The field expects a number as a string (ie. '2'). Unscored fields are
empty strings and are stored as NULL
"""
if value is None:
return None
if isinstance(value, string_types) and len(value) == 0:
return None
value = int(value)
if value < 0:
value = 0
return value
def formfield(self, **kwargs):
defaults = {
'min_value': 0,
'form_class': forms.LikertFormField
}
defaults.update(kwargs)
return super(LikertField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^likert_field\.models\.LikertField"])
|
kelvinwong-ca/django-likert-field
|
likert_field/models.py
|
Python
|
bsd-3-clause
| 1,847
|
#!/usr/bin/env python
from wsgi import *
from django.contrib.auth.models import User
try:
wunki = User.objects.get(username='wunki')
except User.DoesNotExist:
pass
else:
wunki.is_staff = True
wunki.is_superuser = True
wunki.save()
|
pjdelport/django-userena
|
demo/superuser.py
|
Python
|
bsd-3-clause
| 251
|
AR[AR%2==0] = 0.
AR
|
jorisvandenbossche/DS-python-data-analysis
|
notebooks/python_recap/_solutions/05-numpy75.py
|
Python
|
bsd-3-clause
| 19
|