code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from .base import ObjectBase
class Capture(ObjectBase):
@classmethod
def get_resource_class(cls, client):
from ..resources.captures import Captures
return Captures(client)
@property
def id(self):
return self._get_property("id")
@property
def mode(self):
return self._get_property("mode")
@property
def amount(self):
return self._get_property("amount")
@property
def settlement_amount(self):
return self._get_property("settlementAmount")
@property
def payment_id(self):
return self._get_property("paymentId")
@property
def shipment_id(self):
return self._get_property("shipmentId")
@property
def settlement_id(self):
return self._get_property("settlementId")
@property
def created_at(self):
return self._get_property("createdAt")
@property
def payment(self):
"""Return the payment for this capture."""
# TODO Use the embedded payment data, if avalable.
return self.client.payments.get(self.payment_id)
@property
def shipment(self):
"""Return the shipment for this capture."""
url = self._get_link("shipment")
if url:
return self.client.shipments.from_url(url)
@property
def settlement(self):
"""Return the settlement for this capture."""
return self.client.settlements.get(self.settlement_id)
|
mollie/mollie-api-python
|
mollie/api/objects/capture.py
|
Python
|
bsd-2-clause
| 1,458
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 26 20:17:06 2014
@author: stuart
"""
import os
import tempfile
import datetime
import astropy.table
import astropy.time
import astropy.units as u
import pytest
from sunpy.time import parse_time
from sunpy.net.jsoc import JSOCClient, JSOCResponse
from sunpy.net.vso.vso import Results
import sunpy.net.jsoc.attrs as attrs
client = JSOCClient()
def test_jsocresponse_double():
j1 = JSOCResponse(table=astropy.table.Table(data=[[1,2,3,4]]))
j1.append(astropy.table.Table(data=[[1,2,3,4]]))
assert isinstance(j1, JSOCResponse)
assert all(j1.table == astropy.table.vstack([astropy.table.Table(data=[[1,2,3,4]]),
astropy.table.Table(data=[[1,2,3,4]])]))
def test_jsocresponse_single():
j1 = JSOCResponse(table=None)
assert len(j1) == 0
j1.append(astropy.table.Table(data=[[1,2,3,4]]))
assert all(j1.table == astropy.table.Table(data=[[1,2,3,4]]))
assert len(j1) == 4
def test_payload():
start = parse_time('2012/1/1T00:00:00')
end = parse_time('2012/1/1T00:00:45')
payload = client._make_query_payload(start, end, 'hmi.M_42s', notify='@')
payload_expected = {
'ds': '{0}[{1}-{2}]'.format('hmi.M_42s',
start.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
end.strftime("%Y.%m.%d_%H:%M:%S_TAI")),
'format': 'json',
'method': 'url',
'notify': '@',
'op': 'exp_request',
'process': 'n=0|no_op',
'protocol': 'FITS,compress Rice',
'requestor': 'none',
'filenamefmt': '{0}.{{T_REC:A}}.{{CAMERA}}.{{segment}}'.format('hmi.M_42s')
}
assert payload == payload_expected
def test_payload_nocompression():
start = parse_time('2012/1/1T00:00:00')
end = parse_time('2012/1/1T00:00:45')
payload = client._make_query_payload(start, end, 'hmi.M_42s',
compression=None, notify='jsoc@cadair.com')
payload_expected = {
'ds':'{0}[{1}-{2}]'.format('hmi.M_42s', start.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
end.strftime("%Y.%m.%d_%H:%M:%S_TAI")),
'format':'json',
'method':'url',
'notify':'jsoc@cadair.com',
'op':'exp_request',
'process':'n=0|no_op',
'protocol':'FITS, **NONE**',
'requestor':'none',
'filenamefmt':'{0}.{{T_REC:A}}.{{CAMERA}}.{{segment}}'.format('hmi.M_42s')
}
assert payload == payload_expected
def test_payload_protocol():
start = parse_time('2012/1/1T00:00:00')
end = parse_time('2012/1/1T00:00:45')
payload = client._make_query_payload(start, end, 'hmi.M_42s', protocol='as-is',
notify='jsoc@cadair.com')
payload_expected = {
'ds':'{0}[{1}-{2}]'.format('hmi.M_42s', start.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
end.strftime("%Y.%m.%d_%H:%M:%S_TAI")),
'format':'json',
'method':'url',
'notify':'jsoc@cadair.com',
'op':'exp_request',
'process':'n=0|no_op',
'protocol':'as-is',
'requestor':'none',
'filenamefmt':'{0}.{{T_REC:A}}.{{CAMERA}}.{{segment}}'.format('hmi.M_42s')
}
assert payload == payload_expected
def test_process_time_string():
start = client._process_time('2012/1/1T00:00:00')
assert start == datetime.datetime(year=2012, month=1, day=1, second=34)
def test_process_time_datetime():
start = client._process_time(datetime.datetime(year=2012, month=1, day=1))
assert start == datetime.datetime(year=2012, month=1, day=1, second=34)
def test_process_time_astropy():
start = client._process_time(astropy.time.Time('2012-01-01T00:00:00', format='isot', scale='utc'))
assert start == datetime.datetime(year=2012, month=1, day=1, second=34)
def test_process_time_astropy_tai():
start = client._process_time(astropy.time.Time('2012-01-01T00:00:00', format='isot', scale='tai'))
assert start == datetime.datetime(year=2012, month=1, day=1, second=0)
@pytest.mark.online
def test_status_request():
r = client._request_status('none')
assert r.json() == {u'error': u'requestid none is not an acceptable ID for the external export system (acceptable format is JSOC_YYYYMMDD_NNN_X_IN or JSOC_YYYYMMDD_NNN).',
u'status': 4}
def test_empty_jsoc_response():
Jresp = JSOCResponse()
assert Jresp.table is None
assert Jresp.query_args is None
assert Jresp.requestIDs is None
assert str(Jresp) == 'None'
assert repr(Jresp) == 'None'
assert len(Jresp) == 0
@pytest.mark.online
def test_query():
Jresp = client.query(attrs.Time('2012/1/1T00:00:00', '2012/1/1T00:01:30'),
attrs.Series('hmi.M_45s'),attrs.Sample(90*u.second))
assert isinstance(Jresp, JSOCResponse)
assert len(Jresp) == 2
@pytest.mark.online
def test_post_pass():
responses = client.query(attrs.Time('2012/1/1T00:00:00', '2012/1/1T00:00:45'),
attrs.Series('hmi.M_45s'), attrs.Notify('jsoc@cadair.com'))
aa = client.request_data(responses, return_resp=True)
tmpresp = aa[0].json()
assert tmpresp['status'] == 2
assert tmpresp['protocol'] == 'FITS,compress Rice'
assert tmpresp['method'] == 'url'
@pytest.mark.online
def test_post_wavelength():
responses = client.query(attrs.Time('2010/07/30T13:30:00','2010/07/30T14:00:00'),attrs.Series('aia.lev1_euv_12s'),
attrs.Wavelength(193*u.AA)|attrs.Wavelength(335*u.AA), attrs.Notify('jsoc@cadair.com'))
aa = client.request_data(responses, return_resp=True)
tmpresp = aa[0].json()
assert tmpresp['status'] == 2
assert tmpresp['protocol'] == 'FITS,compress Rice'
assert tmpresp['method'] == 'url'
assert tmpresp['rcount'] == 302
@pytest.mark.online()
def test_post_wave_series():
with pytest.raises(TypeError):
client.query(attrs.Time('2012/1/1T00:00:00', '2012/1/1T00:00:45'),
attrs.Series('hmi.M_45s')|attrs.Series('aia.lev1_euv_12s'),
attrs.Wavelength(193*u.AA)|attrs.Wavelength(335*u.AA))
@pytest.mark.online
def test_post_fail(recwarn):
res = client.query(attrs.Time('2012/1/1T00:00:00', '2012/1/1T00:00:45'),
attrs.Series('none'), attrs.Notify('jsoc@cadair.com'))
client.request_data(res, return_resp=True)
w = recwarn.pop(Warning)
assert issubclass(w.category, Warning)
assert "Query 0 returned status 4 with error Series none is not a valid series accessible from hmidb2." == str(w.message)
assert w.filename
assert w.lineno
@pytest.mark.online
def test_request_status_fail():
resp = client._request_status('none')
assert resp.json() == {u'status': 4, u'error': u"requestid none is not an acceptable ID for the external export system (acceptable format is JSOC_YYYYMMDD_NNN_X_IN or JSOC_YYYYMMDD_NNN)."}
resp = client._request_status(['none'])
assert resp.json() == {u'status': 4, u'error': u"requestid none is not an acceptable ID for the external export system (acceptable format is JSOC_YYYYMMDD_NNN_X_IN or JSOC_YYYYMMDD_NNN)."}
@pytest.mark.online
#@pytest.mark.xfail
def test_wait_get():
responses = client.query(attrs.Time('2012/1/1T1:00:36', '2012/1/1T01:00:38'),
attrs.Series( 'hmi.M_45s'), attrs.Notify('jsoc@cadair.com'))
path = tempfile.mkdtemp()
res = client.get(responses, path=path)
assert isinstance(res, Results)
assert res.total == 1
@pytest.mark.online
def test_get_request():
responses = client.query(attrs.Time('2012/1/1T1:00:36', '2012/1/1T01:00:38'),
attrs.Series('hmi.M_45s'), attrs.Notify('jsoc@cadair.com'))
bb = client.request_data(responses)
path = tempfile.mkdtemp()
aa = client.get_request(bb, path=path)
assert isinstance(aa, Results)
@pytest.mark.online
def test_results_filenames():
responses = client.query(attrs.Time('2014/1/1T1:00:36', '2014/1/1T01:01:38'),
attrs.Series('hmi.M_45s'), attrs.Notify('jsoc@cadair.com'))
path = tempfile.mkdtemp()
aa = client.get(responses, path=path)
assert isinstance(aa, Results)
files = aa.wait()
assert len(files) == len(responses)
for hmiurl in aa.map_:
assert os.path.basename(hmiurl) == os.path.basename(aa.map_[hmiurl]['path'])
@pytest.mark.online
def test_invalid_query():
with pytest.raises(ValueError):
resp = client.query(attrs.Time('2012/1/1T01:00:00', '2012/1/1T01:00:45'))
|
Alex-Ian-Hamilton/sunpy
|
sunpy/net/jsoc/tests/test_jsoc.py
|
Python
|
bsd-2-clause
| 8,635
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
"""
Example:
$ python pydy_double_pendulum.py --plot --nt 200
"""
import numpy as np
from pyodesys.symbolic import SymbolicSys
from pyodesys.util import stack_1d_on_left
def get_equations(m_val, g_val, l_val):
# This function body is copyied from:
# http://www.pydy.org/examples/double_pendulum.html
# Retrieved 2015-09-29
from sympy import symbols
from sympy.physics.mechanics import (
dynamicsymbols, ReferenceFrame, Point, Particle, KanesMethod
)
q1, q2 = dynamicsymbols('q1 q2')
q1d, q2d = dynamicsymbols('q1 q2', 1)
u1, u2 = dynamicsymbols('u1 u2')
u1d, u2d = dynamicsymbols('u1 u2', 1)
l, m, g = symbols('l m g')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = N.orientnew('B', 'Axis', [q2, N.z])
A.set_ang_vel(N, u1 * N.z)
B.set_ang_vel(N, u2 * N.z)
O = Point('O')
P = O.locatenew('P', l * A.x)
R = P.locatenew('R', l * B.x)
O.set_vel(N, 0)
P.v2pt_theory(O, N, A)
R.v2pt_theory(P, N, B)
ParP = Particle('ParP', P, m)
ParR = Particle('ParR', R, m)
kd = [q1d - u1, q2d - u2]
FL = [(P, m * g * N.x), (R, m * g * N.x)]
BL = [ParP, ParR]
KM = KanesMethod(N, q_ind=[q1, q2], u_ind=[u1, u2], kd_eqs=kd)
try:
(fr, frstar) = KM.kanes_equations(bodies=BL, loads=FL)
except TypeError:
(fr, frstar) = KM.kanes_equations(FL, BL)
kdd = KM.kindiffdict()
mm = KM.mass_matrix_full
fo = KM.forcing_full
qudots = mm.inv() * fo
qudots = qudots.subs(kdd)
qudots.simplify()
# Edit:
depv = [q1, q2, u1, u2]
subs = list(zip([m, g, l], [m_val, g_val, l_val]))
return zip(depv, [expr.subs(subs) for expr in qudots])
def main(m=1, g=9.81, l=1, q1=.1, q2=.2, u1=0, u2=0, tend=10., nt=200,
savefig='None', plot=False, savetxt='None', integrator='scipy',
dpi=100, kwargs="", verbose=False):
assert nt > 1
kwargs = dict(eval(kwargs) if kwargs else {})
odesys = SymbolicSys(get_equations(m, g, l), params=())
tout = np.linspace(0, tend, nt)
y0 = [q1, q2, u1, u2]
xout, yout, info = odesys.integrate(
tout, y0, integrator=integrator, **kwargs)
if verbose:
print(info)
if savetxt != 'None':
np.savetxt(stack_1d_on_left(xout, yout), savetxt)
if plot:
import matplotlib.pyplot as plt
odesys.plot_result(xout, yout)
if savefig != 'None':
plt.savefig(savefig, dpi=dpi)
else:
plt.show()
if __name__ == '__main__':
try:
import argh
argh.dispatch_command(main)
except ImportError:
import sys
if len(sys.argv) > 1:
import warnings
warnings.warn("Ignoring parameters run "
"'pip install --user argh' to fix.")
main()
|
bjodah/pyodesys
|
examples/pydy_double_pendulum.py
|
Python
|
bsd-2-clause
| 2,949
|
from django.conf import settings
from django.forms import widgets
class GoogleMapsAddressWidget(widgets.TextInput):
"""a widget that will place a google map right after the #id_address field"""
template_name = "django_google_maps/widgets/map_widget.html"
class Media:
css = {
'all': ('django_google_maps/css/google-maps-admin.css', )
}
js = (
'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js',
'https://maps.google.com/maps/api/js?key={}&libraries=places'.format(
settings.GOOGLE_MAPS_API_KEY),
'django_google_maps/js/google-maps-admin.js',
)
|
madisona/django-google-maps
|
django_google_maps/widgets.py
|
Python
|
bsd-2-clause
| 671
|
#!/usr/bin/python env
"""
This module contains symbolic implementations of VEX operations.
"""
import re
import sys
import collections
import itertools
import operator
import logging
l = logging.getLogger("angr.engines.vex.irop")
import pyvex
import claripy
#
# The more sane approach
#
def op_attrs(p):
m = re.match(r'^Iop_' \
r'(?P<generic_name>\D+?)??' \
r'(?P<from_type>I|F|D|V)??' \
r'(?P<from_signed>U|S)??' \
r'(?P<from_size>\d+)??' \
r'(?P<from_signed_back>U|S)??' \
# this screws up CmpLE: r'(?P<e_flag>E)??' \
r'('
r'(?P<from_side>HL|HI|L|LO)??' \
r'(?P<conversion>to|as)' \
r'(?P<to_type>Int|I|F|D|V)??' \
r'(?P<to_size>\d+)??' \
r'(?P<to_signed>U|S)??' \
r')??'
r'(?P<vector_info>\d+U?S?F?0?x\d+)??' \
r'(?P<rounding_mode>_R(Z|P|N|M))?$', \
p)
if not m:
l.debug("Unmatched operation: %s", p)
return None
else:
l.debug("Matched operation: %s", p)
attrs = m.groupdict()
attrs['from_signed'] = attrs['from_signed_back'] if attrs['from_signed'] is None else attrs['from_signed']
attrs.pop('from_signed_back', None)
if attrs['generic_name'] == 'CmpOR':
assert attrs['from_type'] == 'D'
attrs['generic_name'] = 'CmpORD'
attrs['from_type'] = None
# fix up vector stuff
vector_info = attrs.pop('vector_info', None)
if vector_info:
vm = re.match(r'^(?P<vector_size>\d+)?' \
r'(?P<vector_signed>U|S)?' \
r'(?P<vector_type>F|D)?' \
r'(?P<vector_zero>0)?' \
r'x' \
r'(?P<vector_count>\d+)?$', \
vector_info)
attrs.update(vm.groupdict())
for k,v in attrs.items():
if v is not None and v != "":
l.debug("... %s: %s", k, v)
return attrs
all_operations = pyvex.enum_IROp_fromstr.keys()
operations = { }
classified = set()
unclassified = set()
unsupported = set()
explicit_attrs = {
'Iop_64x4toV256': {
'_generic_name': '64x4',
'_to_size': 256,
},
'Iop_Yl2xF64': {
'_generic_name': 'Yl2x',
'_to_size': 64,
},
'Iop_Yl2xp1F64': {
'_generic_name': 'Yl2xp1',
'_to_size': 64,
},
}
def make_operations():
for p in all_operations:
if p in ('Iop_INVALID', 'Iop_LAST'):
continue
if p in explicit_attrs:
attrs = explicit_attrs[p]
else:
attrs = op_attrs(p)
if attrs is None:
unclassified.add(p)
else:
classified.add(p)
try:
operations[p] = SimIROp(p, **attrs)
except SimOperationError:
unsupported.add(p)
l.debug("%d matched (%d supported) and %d unmatched operations", len(classified), len(operations), len(unclassified))
arithmetic_operation_map = {
'Add': '__add__',
'Sub': '__sub__',
'Mul': '__mul__',
'Div': '__div__',
'Neg': 'Neg',
'Abs': 'Abs',
}
shift_operation_map = {
'Shl': '__lshift__',
'Shr': 'LShR',
'Sar': '__rshift__',
}
bitwise_operation_map = {
'Xor': '__xor__',
'Or': '__or__',
'And': '__and__',
'Not': '__invert__',
}
rm_map = {
0: claripy.fp.RM_RNE,
1: claripy.fp.RM_RTN,
2: claripy.fp.RM_RTP,
3: claripy.fp.RM_RTZ,
}
generic_names = set()
conversions = collections.defaultdict(list)
unsupported_conversions = [ ]
add_operations = [ ]
other_operations = [ ]
vector_operations = [ ]
fp_ops = set()
common_unsupported_generics = collections.Counter()
def supports_vector(f):
f.supports_vector = True
return f
class SimIROp(object):
"""
A symbolic version of a Vex IR operation.
"""
def __init__(self, name, **attrs):
l.debug("Creating SimIROp(%s)", name)
self.name = name
self.op_attrs = attrs
self._generic_name = None
self._from_size = None
self._from_side = None
self._from_type = None
self._from_signed = None
self._to_size = None
self._to_type = None
self._to_signed = None
self._conversion = None
self._vector_size = None
self._vector_signed = None
self._vector_type = None
self._vector_zero = None
self._vector_count = None
self._rounding_mode = None
for k,v in self.op_attrs.items():
if v is not None and ('size' in k or 'count' in k):
v = int(v)
setattr(self, '_%s'%k, v)
# determine the output size
#pylint:disable=no-member
self._output_type = pyvex.typeOfIROp(name)
#pylint:enable=no-member
self._output_size_bits = size_bits(self._output_type)
l.debug("... VEX says the output size should be %s", self._output_size_bits)
size_check = self._to_size is None or (self._to_size*2 if self._generic_name == 'DivMod' else self._to_size) == self._output_size_bits
if not size_check:
raise SimOperationError("VEX output size doesn't match detected output size")
#
# Some categorization
#
generic_names.add(self._generic_name)
if self._conversion is not None:
conversions[(self._from_type, self._from_signed, self._to_type, self._to_signed)].append(self)
if len({self._vector_type, self._from_type, self._to_type} & {'F', 'D'}) != 0:
# print self.op_attrs
self._float = True
if len({self._vector_type, self._from_type, self._to_type} & {'D'}) != 0:
l.debug('... aborting on BCD!')
# fp_ops.add(self.name)
raise UnsupportedIROpError("BCD ops aren't supported")
else:
self._float = False
#
# Now determine the operation
#
self._calculate = None
# is it explicitly implemented?
if hasattr(self, '_op_' + name):
self._calculate = getattr(self, '_op_' + name)
# if the generic name is None and there's a conversion present, this is a standard
# widening or narrowing or sign-extension
elif self._generic_name is None and self._conversion:
# convert int to float
if self._float and self._from_type == 'I':
self._calculate = self._op_int_to_fp
# convert float to differently-sized float
elif self._from_type == 'F' and self._to_type == 'F':
self._calculate = self._op_fp_to_fp
elif self._from_type == 'F' and self._to_type == 'I':
self._calculate = self._op_fp_to_int
# this concatenates the args into the high and low halves of the result
elif self._from_side == 'HL':
l.debug("... using simple concat")
self._calculate = self._op_concat
# this just returns the high half of the first arg
elif self._from_size > self._to_size and self._from_side == 'HI':
l.debug("... using hi half")
self._calculate = self._op_hi_half
# this just returns the high half of the first arg
elif self._from_size > self._to_size and self._from_side in ('L', 'LO'):
l.debug("... using lo half")
self._calculate = self._op_lo_half
elif self._from_size > self._to_size and self._from_side is None:
l.debug("... just extracting")
self._calculate = self._op_extract
elif self._from_size < self._to_size and self.is_signed:
l.debug("... using simple sign-extend")
self._calculate = self._op_sign_extend
elif self._from_size < self._to_size and not self.is_signed:
l.debug("... using simple zero-extend")
self._calculate = self._op_zero_extend
else:
l.error("%s is an unexpected conversion operation configuration", self)
assert False
# other conversions
elif self._conversion and self._generic_name != 'Round' and self._generic_name != 'Reinterp':
if self._generic_name == "DivMod":
l.debug("... using divmod")
self._calculate = self._op_divmod
else:
unsupported_conversions.append(self.name)
common_unsupported_generics[self._generic_name] += 1
# generic bitwise
elif self._generic_name in bitwise_operation_map:
l.debug("... using generic mapping op")
assert self._from_side is None
self._calculate = self._op_mapped
# generic mapping operations
elif self._generic_name in arithmetic_operation_map or self._generic_name in shift_operation_map:
l.debug("... using generic mapping op")
assert self._from_side is None
if self._float and self._vector_zero:
self._calculate = self._op_float_op_just_low
elif self._float and self._vector_count is None:
self._calculate = self._op_float_mapped
elif not self._float and self._vector_count is not None:
self._calculate = self._op_vector_mapped
else:
self._calculate = self._op_mapped
# TODO: clean up this mess
# specifically-implemented generics
elif self._float and hasattr(self, '_op_fgeneric_%s' % self._generic_name):
l.debug("... using generic method")
calculate = getattr(self, '_op_fgeneric_%s' % self._generic_name)
if self._vector_size is not None and \
not hasattr(calculate, 'supports_vector'):
# unsupported vector ops
vector_operations.append(name)
else:
self._calculate = calculate
elif not self._float and hasattr(self, '_op_generic_%s' % self._generic_name):
l.debug("... using generic method")
calculate = getattr(self, '_op_generic_%s' % self._generic_name)
if self._vector_size is not None and \
not hasattr(calculate, 'supports_vector'):
# unsupported vector ops
vector_operations.append(name)
else:
self._calculate = calculate
else:
common_unsupported_generics[self._generic_name] += 1
other_operations.append(name)
# if we're here and calculate is None, we don't support this
if self._calculate is None:
l.debug("... can't support operations")
raise UnsupportedIROpError("no calculate function identified for %s" % self.name)
def __repr__(self):
return "<SimIROp %s>" % self.name
def _dbg_print_attrs(self):
print "Operation: %s" % self.name
for k,v in self.op_attrs.items():
if v is not None and v != "":
print "... %s: %s" % (k, v)
def calculate(self, *args):
if not all(isinstance(a, claripy.ast.Base) for a in args):
import ipdb; ipdb.set_trace()
raise SimOperationError("IROp needs all args as claripy expressions")
if not self._float:
args = tuple(arg.to_bv() for arg in args)
try:
return self.extend_size(self._calculate(args))
except (TypeError, ValueError, SimValueError, claripy.ClaripyError):
e_type, value, traceback = sys.exc_info()
raise SimOperationError, ("%s._calculate() raised exception" % self.name, e_type, value), traceback
except ZeroDivisionError:
raise SimOperationError("divide by zero!")
def extend_size(self, o):
cur_size = o.size()
if cur_size < self._output_size_bits:
l.debug("Extending output of %s from %d to %d bits", self.name, cur_size, self._output_size_bits)
ext_size = self._output_size_bits - cur_size
if self._to_signed == 'S' or (self._from_signed == 'S' and self._to_signed is None):
return claripy.SignExt(ext_size, o)
else:
return claripy.ZeroExt(ext_size, o)
elif cur_size > self._output_size_bits:
__import__('ipdb').set_trace()
raise SimOperationError('output of %s is too big', self.name)
else:
return o
@property
def is_signed(self):
return self._from_signed == 'S' or self._vector_signed == 'S'
#
# The actual operation handlers go here.
#
#pylint:disable=no-self-use,unused-argument
def _op_mapped(self, args):
if self._from_size is not None:
sized_args = [ ]
for a in args:
s = a.size()
if s == self._from_size:
sized_args.append(a)
elif s < self._from_size:
if self.is_signed:
sized_args.append(claripy.SignExt(self._from_size - s, a))
else:
sized_args.append(claripy.ZeroExt(self._from_size - s, a))
elif s > self._from_size:
raise SimOperationError("operation %s received too large an argument" % self.name)
else:
sized_args = args
if self._generic_name in bitwise_operation_map:
o = bitwise_operation_map[self._generic_name]
elif self._generic_name in arithmetic_operation_map:
o = arithmetic_operation_map[self._generic_name]
elif self._generic_name in shift_operation_map:
o = shift_operation_map[self._generic_name]
else:
raise SimOperationError("op_mapped called with invalid mapping, for %s" % self.name)
return getattr(claripy.ast.BV, o)(*sized_args)
def _translate_rm(self, rm_num):
if not rm_num.symbolic:
return rm_map[rm_num._model_concrete.value]
else:
l.warning("symbolic rounding mode found, using default")
return claripy.fp.RM.default()
def _op_float_mapped(self, args):
NO_RM = { 'Neg', 'Abs' }
op = getattr(claripy, 'fp' + self._generic_name)
if self._generic_name in NO_RM:
return op(*args)
rm = self._translate_rm(args[0])
return op(rm, *args[1:])
def _op_vector_mapped(self, args):
chopped_args = ([claripy.Extract((i + 1) * self._vector_size - 1, i * self._vector_size, a) for a in args]
for i in reversed(xrange(self._vector_count)))
return claripy.Concat(*(self._op_mapped(ca) for ca in chopped_args))
def _op_float_op_just_low(self, args):
chopped = [arg[(self._vector_size - 1):0].raw_to_fp() for arg in args]
result = getattr(claripy, 'fp' + self._generic_name)(claripy.fp.RM.default(), *chopped).to_bv()
return claripy.Concat(args[0][(args[0].length - 1):self._vector_size], result)
def _op_concat(self, args):
return claripy.Concat(*args)
def _op_hi_half(self, args):
return claripy.Extract(args[0].size()-1, args[0].size()/2, args[0])
def _op_lo_half(self, args):
return claripy.Extract(args[0].size()/2 - 1, 0, args[0])
def _op_extract(self, args):
return claripy.Extract(self._to_size - 1, 0, args[0])
def _op_sign_extend(self, args):
return claripy.SignExt(self._to_size - args[0].size(), args[0])
def _op_zero_extend(self, args):
return claripy.ZeroExt(self._to_size - args[0].size(), args[0])
def vector_args(self, args):
"""
Yields each of the individual lane pairs from the arguments, in
order from most significan to least significant
"""
for i in reversed(range(self._vector_count)):
pieces = []
for vec in args:
pieces.append(vec[(i+1) * self._vector_size - 1 : i * self._vector_size])
yield pieces
def _op_generic_Mull(self, args):
op1, op2 = args
op1 = self.extend_size(op1)
op2 = self.extend_size(op2)
return op1 * op2
def _op_generic_Clz(self, args):
"""Count the leading zeroes"""
wtf_expr = claripy.BVV(self._from_size, self._from_size)
for a in range(self._from_size):
bit = claripy.Extract(a, a, args[0])
wtf_expr = claripy.If(bit==1, claripy.BVV(self._from_size-a-1, self._from_size), wtf_expr)
return wtf_expr
def _op_generic_Ctz(self, args):
"""Count the trailing zeroes"""
wtf_expr = claripy.BVV(self._from_size, self._from_size)
for a in reversed(range(self._from_size)):
bit = claripy.Extract(a, a, args[0])
wtf_expr = claripy.If(bit == 1, claripy.BVV(a, self._from_size), wtf_expr)
return wtf_expr
def generic_minmax(self, args, cmp_op):
res_comps = []
for i in reversed(range(self._vector_count)):
a_comp = claripy.Extract((i+1) * self._vector_size - 1,
i * self._vector_size,
args[0])
b_comp = claripy.Extract((i+1) * self._vector_size - 1,
i * self._vector_size,
args[1])
res_comps.append(claripy.If(cmp_op(a_comp, b_comp),
a_comp, b_comp))
return claripy.Concat(*res_comps)
@supports_vector
def _op_generic_Min(self, args):
return self.generic_minmax(args, claripy.SLT if self.is_signed else claripy.ULT)
@supports_vector
def _op_generic_Max(self, args):
return self.generic_minmax(args, claripy.SGT if self.is_signed else claripy.UGT)
@supports_vector
def _op_generic_GetMSBs(self, args):
size = self._vector_count * self._vector_size
bits = [claripy.Extract(i, i, args[0]) for i in range(size - 1, 6, -8)]
return claripy.Concat(*bits)
@supports_vector
def _op_generic_InterleaveLO(self, args):
s = self._vector_size
c = self._vector_count
dst_vector = [ args[0][(i+1)*s-1:i*s] for i in xrange(c/2) ]
src_vector = [ args[1][(i+1)*s-1:i*s] for i in xrange(c/2) ]
return claripy.Concat(*itertools.chain.from_iterable(reversed(zip(dst_vector, src_vector))))
def generic_compare(self, args, comparison):
if self._vector_size is not None:
res_comps = []
for i in reversed(range(self._vector_count)):
a_comp = claripy.Extract((i+1) * self._vector_size - 1,
i * self._vector_size,
args[0])
b_comp = claripy.Extract((i+1) * self._vector_size - 1,
i * self._vector_size,
args[1])
res_comps.append(claripy.If(comparison(a_comp, b_comp),
claripy.BVV(-1, self._vector_size),
claripy.BVV(0, self._vector_size)))
return claripy.Concat(*res_comps)
else:
return claripy.If(comparison(args[0], args[1]), claripy.BVV(1, 1), claripy.BVV(0, 1))
@supports_vector
def _op_generic_CmpEQ(self, args):
return self.generic_compare(args, operator.eq)
_op_generic_CasCmpEQ = _op_generic_CmpEQ
def _op_generic_CmpNE(self, args):
return self.generic_compare(args, operator.ne)
_op_generic_ExpCmpNE = _op_generic_CmpNE
_op_generic_CasCmpNE = _op_generic_CmpNE
@supports_vector
def _op_generic_CmpNEZ(self, args):
assert len(args) == 1
args = [args[0], claripy.BVV(0, args[0].size())]
return self.generic_compare(args, operator.ne) # TODO: Is this the correct action for scalars?
@supports_vector
def _op_generic_CmpGT(self, args):
return self.generic_compare(args, claripy.SGT if self.is_signed else claripy.UGT)
_op_generic_CasCmpGT = _op_generic_CmpGT
@supports_vector
def _op_generic_CmpGE(self, args):
return self.generic_compare(args, claripy.SGE if self.is_signed else claripy.UGE)
_op_generic_CasCmpGE = _op_generic_CmpGE
@supports_vector
def _op_generic_CmpLT(self, args):
return self.generic_compare(args, claripy.SLT if self.is_signed else claripy.ULT)
_op_generic_CasCmpLT = _op_generic_CmpLT
@supports_vector
def _op_generic_CmpLE(self, args):
return self.generic_compare(args, claripy.SLE if self.is_signed else claripy.ULE)
_op_generic_CasCmpLE = _op_generic_CmpLE
def _op_generic_CmpORD(self, args):
x = args[0]
y = args[1]
s = self._from_size
cond = x < y if self.is_signed else claripy.ULT(x, y)
return claripy.If(x == y, claripy.BVV(0x2, s), claripy.If(cond, claripy.BVV(0x8, s), claripy.BVV(0x4, s)))
def generic_shift_thing(self, args, op):
if self._vector_size is not None:
shifted = []
if args[1].length != self._vector_size:
shift_by = args[1].zero_extend(self._vector_size - args[1].length)
else:
shift_by = args[1]
for i in reversed(range(self._vector_count)):
left = claripy.Extract((i+1) * self._vector_size - 1,
i * self._vector_size,
args[0])
shifted.append(op(left, shift_by))
return claripy.Concat(*shifted)
else:
raise SimOperationError("you done fucked")
@supports_vector
def _op_generic_ShlN(self, args):
return self.generic_shift_thing(args, operator.lshift)
@supports_vector
def _op_generic_ShrN(self, args):
return self.generic_shift_thing(args, claripy.LShR)
@supports_vector
def _op_generic_SarN(self, args):
return self.generic_shift_thing(args, operator.rshift)
@supports_vector
def _op_generic_HAdd(self, args):
"""
Halving add, for some ARM NEON instructions.
"""
components = []
for a, b in self.vector_args(args):
if self.is_signed:
a = a.sign_extend(self._vector_size)
b = b.sign_extend(self._vector_size)
else:
a = a.zero_extend(self._vector_size)
b = b.zero_extend(self._vector_size)
components.append((a + b)[self._vector_size:1])
return claripy.Concat(*components)
@supports_vector
def _op_generic_HSub(self, args):
"""
Halving subtract, for some ARM NEON instructions.
"""
components = []
for a, b in self.vector_args(args):
if self.is_signed:
a = a.sign_extend(self._vector_size)
b = b.sign_extend(self._vector_size)
else:
a = a.zero_extend(self._vector_size)
b = b.zero_extend(self._vector_size)
components.append((a - b)[self._vector_size:1])
return claripy.Concat(*components)
@supports_vector
def _op_generic_QAdd(self, args):
"""
Saturating add.
"""
components = []
for a, b in self.vector_args(args):
top_a = a[self._vector_size-1]
top_b = b[self._vector_size-1]
res = a + b
top_r = res[self._vector_size-1]
if self.is_signed:
big_top_r = (~top_r).zero_extend(self._vector_size-1)
cap = (claripy.BVV(-1, self._vector_size)/2) + big_top_r
cap_cond = ((~(top_a ^ top_b)) & (top_a ^ top_r)) == 1
else:
cap = claripy.BVV(-1, self._vector_size)
cap_cond = claripy.ULT(res, a)
components.append(claripy.If(cap_cond, cap, res))
return claripy.Concat(*components)
@supports_vector
def _op_generic_QSub(self, args):
"""
Saturating subtract.
"""
components = []
for a, b in self.vector_args(args):
top_a = a[self._vector_size-1]
top_b = b[self._vector_size-1]
res = a - b
top_r = res[self._vector_size-1]
if self.is_signed:
big_top_r = (~top_r).zero_extend(self._vector_size-1)
cap = (claripy.BVV(-1, self._vector_size)/2) + big_top_r
cap_cond = ((top_a ^ top_b) & (top_a ^ top_r)) == 1
else:
cap = claripy.BVV(0, self._vector_size)
cap_cond = claripy.UGT(res, a)
components.append(claripy.If(cap_cond, cap, res))
return claripy.Concat(*components)
def _op_divmod(self, args):
if self.is_signed:
quotient = (args[0].SDiv(claripy.SignExt(self._from_size - self._to_size, args[1])))
remainder = (args[0].SMod(claripy.SignExt(self._from_size - self._to_size, args[1])))
quotient_size = self._to_size
remainder_size = self._to_size
return claripy.Concat(
claripy.Extract(remainder_size - 1, 0, remainder),
claripy.Extract(quotient_size - 1, 0, quotient)
)
else:
quotient = (args[0] / claripy.ZeroExt(self._from_size - self._to_size, args[1]))
remainder = (args[0] % claripy.ZeroExt(self._from_size - self._to_size, args[1]))
quotient_size = self._to_size
remainder_size = self._to_size
return claripy.Concat(
claripy.Extract(remainder_size - 1, 0, remainder),
claripy.Extract(quotient_size - 1, 0, quotient)
)
#pylint:enable=no-self-use,unused-argument
# FP!
def _op_int_to_fp(self, args):
rm_exists = self._from_size != 32 or self._to_size != 64
rm = self._translate_rm(args[0] if rm_exists else claripy.BVV(0, 32))
arg = args[1 if rm_exists else 0]
return arg.signed_to_fp(rm, claripy.fp.FSort.from_size(self._output_size_bits))
def _op_fp_to_fp(self, args):
rm_exists = self._from_size != 32 or self._to_size != 64
rm = self._translate_rm(args[0] if rm_exists else claripy.BVV(0, 32))
arg = args[1 if rm_exists else 0].raw_to_fp()
return arg.raw_to_fp().to_fp(rm, claripy.fp.FSort.from_size(self._output_size_bits))
def _op_fp_to_int(self, args):
rm = self._translate_rm(args[0])
arg = args[1].raw_to_fp()
if self._to_signed == 'S':
return claripy.fpToSBV(rm, arg, self._to_size)
else:
return claripy.fpToUBV(rm, arg, self._to_size)
def _op_fgeneric_Cmp(self, args): #pylint:disable=no-self-use
a, b = args[0].raw_to_fp(), args[1].raw_to_fp()
return claripy.ite_cases((
(claripy.fpLT(a, b), claripy.BVV(0x01, 32)),
(claripy.fpGT(a, b), claripy.BVV(0x00, 32)),
(claripy.fpEQ(a, b), claripy.BVV(0x40, 32)),
), claripy.BVV(0x45, 32))
def _op_fgeneric_Reinterp(self, args):
if self._to_type == 'I':
return args[0].to_bv()
elif self._to_type == 'F':
return args[0].raw_to_fp()
else:
raise SimOperationError("unsupport Reinterp _to_type")
@supports_vector
def _op_fgeneric_Round(self, args):
if self._vector_size is not None:
rm = {
'RM': claripy.fp.RM_RTN,
'RP': claripy.fp.RM_RTP,
'RN': claripy.fp.RM_RNE,
'RZ': claripy.fp.RM_RTZ,
}[self._rounding_mode]
rounded = []
for i in reversed(range(self._vector_count)):
#pylint:disable=no-member
left = claripy.Extract(
(i+1) * self._vector_size - 1, i * self._vector_size, args[0]
).raw_to_fp()
rounded.append(claripy.fpToSBV(rm, left, self._vector_size))
return claripy.Concat(*rounded)
else:
# note: this a bad solution because it will cut off high values
# TODO: look into fixing this
rm = self._translate_rm(args[0])
rounded_bv = claripy.fpToSBV(rm, args[1].raw_to_fp(), args[1].length)
return claripy.fpToFP(claripy.fp.RM_RNE, rounded_bv, claripy.fp.FSort.from_size(args[1].length))
def _op_generic_pack_StoU_saturation(self, args, src_size, dst_size):
"""
Generic pack with unsigned saturation.
Split args in chunks of src_size signed bits and in pack them into unsigned saturated chunks of dst_size bits.
Then chunks are concatenated resulting in a BV of len(args)*dst_size/src_size*len(args[0]) bits.
"""
if src_size <= 0 or dst_size <= 0:
raise SimOperationError("Can't pack from or to zero or negative size" % self.name)
result = None
max_value = claripy.BVV(-1, dst_size).zero_extend(src_size - dst_size) #max value for unsigned saturation
min_value = claripy.BVV(0, src_size) #min unsigned value always 0
for v in args:
for src_value in v.chop(src_size):
dst_value = self._op_generic_StoU_saturation(src_value, min_value, max_value)
dst_value = dst_value.zero_extend(dst_size - src_size)
if result is None:
result = dst_value
else:
result = self._op_concat((result, dst_value))
return result
def _op_generic_StoU_saturation(self, value, min_value, max_value): #pylint:disable=no-self-use
"""
Return unsigned saturated BV from signed BV.
Min and max value should be unsigned.
"""
return claripy.If(
claripy.SGT(value, max_value),
max_value,
claripy.If(claripy.SLT(value, min_value), min_value, value))
def _op_Iop_64x4toV256(self, args) :
return self._op_concat(args)
def _op_Iop_QNarrowBin16Sto8Ux16(self, args):
"""
PACKUSWB Pack with Unsigned Saturation.Two 128 bits operands version.
VPACKUSWB Pack with Unsigned Saturation.Three 128 bits operands version.
"""
return self._op_generic_pack_StoU_saturation(args, 16, 8)
def _op_Iop_QNarrowBin16Sto8Ux8(self, args):
"""
PACKUSWB Pack with Unsigned Saturation.Two 64 bits operands version.
"""
return self._op_generic_pack_StoU_saturation(args, 16, 8)
#def _op_Iop_Yl2xF64(self, args):
# rm = self._translate_rm(args[0])
# arg2_bv = args[2].to_bv()
# # IEEE754 double looks like this:
# # SEEEEEEEEEEEFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
# # thus, we extract the exponent bits, re-bias them, then
# # (signed) convert them back into an FP value for the integer
# # part of the log. then we make the approximation that log2(x)
# # = x - 1 for 1.0 <= x < 2.0 to account for the mantissa.
# # the bias for doubles is 1023
# arg2_exp = (arg2_bv[62:52] - 1023).signed_to_fp(rm, claripy.fp.FSORT_DOUBLE)
# arg2_mantissa = claripy.Concat(claripy.BVV(int('001111111111', 2), 12), arg2_bv[51:0]).raw_to_fp()
# # this is the hacky approximation:
# log2_arg2_mantissa = claripy.fpSub(rm, arg2_mantissa, claripy.FPV(1.0, claripy.fp.FSORT_DOUBLE))
# return claripy.fpMul(rm, args[1].raw_to_fp(), claripy.fpAdd(rm, arg2_exp, log2_arg2_mantissa))
#def _op_Iop_Yl2xp1F64(self, args):
# rm_raw, arg1, arg2 = args
# rm = self._translate_rm(rm_raw)
# arg2_p1 = claripy.fpAdd(rm, arg2.raw_to_fp(), claripy.FPV(1.0, claripy.fp.FSORT_DOUBLE))
# return self._op_Iop_Yl2xF64((rm_raw, arg1, arg2_p1))
@staticmethod
def pow(rm, arg, n):
out = claripy.FPV(1.0, arg.sort)
for _ in xrange(n):
out = claripy.fpMul(rm, arg, out)
return out
#def _op_Iop_SinF64(self, args):
# rm, arg = args
# rm = self._translate_rm(rm)
# rounds = 15
# accumulator = claripy.FPV(0.0, arg.sort)
# factorialpart = 1.0
# for i in xrange(1, rounds + 1):
# term = claripy.fpDiv(rm, self.pow(rm, arg, 2*i - 1), claripy.FPV(float(factorialpart), arg.sort))
# factorialpart *= ((i*2) + 1) * (i*2)
# if i % 2 == 1:
# accumulator = claripy.fpAdd(rm, accumulator, term)
# else:
# accumulator = claripy.fpSub(rm, accumulator, term)
# return accumulator
#def _op_Iop_CosF64(self, args):
# rm, arg = args
# rm = self._translate_rm(rm)
# rounds = 20
# accumulator = claripy.FPV(1.0, arg.sort)
# factorialpart = 2.0
# for i in xrange(1, rounds + 1):
# term = claripy.fpDiv(rm, self.pow(rm, arg, 2*i), claripy.FPV(float(factorialpart), arg.sort))
# factorialpart *= (i*2 + 1) * (i*2 + 2)
# if i % 2 == 1:
# accumulator = claripy.fpSub(rm, accumulator, term)
# else:
# accumulator = claripy.fpAdd(rm, accumulator, term)
# return accumulator
#
# Op Handler
#
#from . import old_irop
def translate(state, op, s_args):
if op in operations:
try:
irop = operations[op]
if irop._float and not options.SUPPORT_FLOATING_POINT in state.options:
raise UnsupportedIROpError("floating point support disabled")
return irop.calculate( *s_args)
except ZeroDivisionError:
if state.mode == 'static' and len(s_args) == 2 and state.se.is_true(s_args[1] == 0):
# Monkeypatch the dividend to another value instead of 0
s_args[1] = state.se.BVV(1, s_args[1].size())
return operations[op].calculate( *s_args)
else:
raise
except SimOperationError:
l.warning("IROp error (for operation %s)", op, exc_info=True)
if options.BYPASS_ERRORED_IROP in state.options:
return state.se.Unconstrained("irop_error", operations[op]._output_size_bits)
else:
raise
l.error("Unsupported operation: %s", op)
raise UnsupportedIROpError("Unsupported operation: %s" % op)
from . import size_bits
from ...errors import UnsupportedIROpError, SimOperationError, SimValueError
from ... import sim_options as options
make_operations()
|
Ruide/angr-dev
|
angr/angr/engines/vex/irop.py
|
Python
|
bsd-2-clause
| 34,780
|
from setuptools import setup, find_packages
setup(
name = 'evernote',
version = '1.22',
author = 'Evernote Corporation',
author_email = 'en-support@evernote.com',
url = 'http://www.evernote.com/about/developer/api/',
description = 'Python bindings to the Evernote API.',
packages = find_packages('lib'),
package_dir = {'': 'lib'},
)
|
zapier/evernote-sdk-python-old
|
setup.py
|
Python
|
bsd-2-clause
| 366
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import timezone
from parler.managers import TranslatableManager, TranslatableQuerySet
class PostQuerySet(TranslatableQuerySet):
def published(self, **kwargs):
return self.filter(date_published__lte=timezone.now(), **kwargs)
class PostManager(TranslatableManager):
queryset_class = PostQuerySet
def published(self, request, **kwargs):
queryset = self.public(**kwargs).published()
if hasattr(request, 'user') and request.user.is_authenticated:
if request.user.is_staff:
queryset = queryset | self.draft(**kwargs)
queryset = queryset | self.private(request.user, **kwargs)
return queryset
def draft(self, **kwargs):
return self.get_queryset().filter(status=0, **kwargs)
def private(self, user, **kwargs):
return self.get_queryset().filter(status=1, author=user, **kwargs)
def public(self, **kwargs):
return self.get_queryset().filter(status=2, **kwargs)
def hidden(self, **kwargs):
return self.get_queryset().filter(status=3, **kwargs)
|
dinoperovic/djangocms-blogit
|
blogit/managers.py
|
Python
|
bsd-3-clause
| 1,157
|
from django.db import models
from django.contrib import admin
class Company(models.Model):
company = models.CharField(u'Компания', max_length=100)
logo = models.ImageField(u'Логотип', upload_to='logos/', blank=True)
def __unicode__(self):
return self.company
class Meta:
verbose_name = u'компания'
verbose_name_plural = u'компании'
class Date(models.Model):
date = models.DateField(u'Дата')
def __unicode__(self):
return u'Цены на топливо за %s' % self.date.strftime('%d.%m.%Y')
class Meta:
verbose_name = u'цены'
verbose_name_plural = u'цены на топливо'
class Price(models.Model):
company = models.ForeignKey(Company, verbose_name=u'Компания')
date = models.ForeignKey(Date, related_name='price')
a98 = models.FloatField(u'A98')
a95 = models.FloatField(u'A95')
a92 = models.FloatField(u'A92')
a76 = models.FloatField(u'A76')
dt = models.FloatField(u'ДТ')
class PriceInline(admin.TabularInline):
model = Price
extra = 5
class DateAdmin(admin.ModelAdmin):
inlines = [PriceInline]
class CompanyAdmin(admin.ModelAdmin):
pass
admin.site.register(Date, DateAdmin)
admin.site.register(Company, CompanyAdmin)
|
AlexStarov/site_news
|
Apps/fuel/models.py
|
Python
|
bsd-3-clause
| 1,375
|
import os
import glob
import re
import shutil
import sys
import six
import nbgrader.apps
from textwrap import dedent
from clear_docs import run, clear_notebooks
def autogen_command_line(root):
"""Generate command line documentation."""
header = dedent(
"""
``{}``
========================
::
"""
)
apps = [
'AssignApp',
'AutogradeApp',
'CollectApp',
'ExtensionApp',
'FeedbackApp',
'FetchApp',
'FormgradeApp',
'ListApp',
'NbGraderApp',
'ReleaseApp',
'SubmitApp',
'ValidateApp'
]
print('Generating command line documentation')
orig_stdout = sys.stdout
for app in apps:
cls = getattr(nbgrader.apps, app)
buf = sys.stdout = six.StringIO()
cls().print_help(True)
buf.flush()
helpstr = buf.getvalue()
helpstr = "\n".join([" " + x for x in helpstr.split("\n")])
name = cls.name.replace(" ", "-")
destination = os.path.join(root, 'command_line_tools/{}.rst'.format(name))
with open(destination, 'w') as f:
f.write(header.format(cls.name.replace("-", " ")))
f.write(helpstr)
sys.stdout = orig_stdout
def autogen_config(root):
"""Generate an example configuration file"""
header = dedent(
"""
Configuration options
=====================
These options can be set in ``nbgrader_config.py``, or at the command line when you start it.
"""
)
print('Generating example configuration file')
config = nbgrader.apps.NbGraderApp().document_config_options()
destination = os.path.join(root, 'config_options.rst')
with open(destination, 'w') as f:
f.write(header)
f.write(config)
def build_notebooks(root):
"""Execute notebooks and convert them to rst"""
print("Executing and converting notebooks in '{}'...".format(os.path.abspath(root)))
cwd = os.getcwd()
os.chdir(root)
# hack to convert links to ipynb files to html
for filename in sorted(glob.glob('user_guide/*.ipynb')):
run([
sys.executable, '-m', 'jupyter', 'nbconvert',
'--to', 'rst',
'--execute',
'--FilesWriter.build_directory=user_guide',
filename
])
filename = os.path.splitext(filename)[0] + '.rst'
with open(filename, 'r') as fh:
source = fh.read()
source = re.sub(r"<([^><]*)\.ipynb>", r"<\1.html>", source)
with open(filename, 'w') as fh:
fh.write(source)
# convert examples to html
for dirname, dirnames, filenames in os.walk('user_guide'):
if dirname == 'user_guide':
continue
if dirname == 'user_guide/images':
continue
build_directory = os.path.join('extra_files', dirname)
if not os.path.exists(build_directory):
os.makedirs(build_directory)
for filename in sorted(filenames):
if filename.endswith('.ipynb'):
run([
sys.executable, '-m', 'jupyter', 'nbconvert',
'--to', 'html',
"--FilesWriter.build_directory='{}'".format(build_directory),
os.path.join(dirname, filename)
])
else:
shutil.copy(
os.path.join(dirname, filename),
os.path.join(build_directory, filename))
os.chdir(cwd)
if __name__ == "__main__":
root = os.path.abspath(os.path.dirname(__file__))
clear_notebooks(root)
build_notebooks(root)
autogen_command_line(root)
autogen_config(root)
|
dementrock/nbgrader
|
docs/source/build_docs.py
|
Python
|
bsd-3-clause
| 3,741
|
from __future__ import print_function, division, absolute_import #, unicode_literals # not casa compatible
from builtins import bytes, dict, object, range, map, input#, str # not casa compatible
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import os.path
import subprocess
import shutil
from time import sleep
from elasticsearch import Elasticsearch, RequestError, TransportError, helpers, NotFoundError
from urllib3.connection import ConnectionError, NewConnectionError
import logging
logging.getLogger('elasticsearch').setLevel(30)
logger = logging.getLogger(__name__)
logger.setLevel(20)
# eventually should be updated to search.realfast.io/api with auth
es = Elasticsearch(['realfast.nrao.edu:9200'], timeout=60, max_retries=3, retry_on_timeout=True)
###
# Indexing stuff
###
def indexscan(config=None, inmeta=None, sdmfile=None, sdmscan=None,
sdmsubscan=1, bdfdir=None, preferences=None, datasource=None,
indexprefix='new'):
""" Index properties of scan.
Uses data source (config, sdm, etc.) to define metadata object.
"""
from rfpipe.metadata import make_metadata
from numpy import degrees
from realfast import heuristics
meta = make_metadata(inmeta=inmeta, config=config, sdmfile=sdmfile,
sdmscan=sdmscan, bdfdir=bdfdir)
if meta.datasource is None:
if datasource is not None:
meta.datasource = datasource
elif config is not None:
meta.datasource = 'vys'
elif (sdmfile is not None) and (sdmscan is not None):
meta.datasource = 'sdm'
else:
logger.warn("Could not determine datasource for indexing.")
# define dict for scan properties to index
scandict = {}
scandict['datasetId'] = meta.datasetId
scandict['scanId'] = meta.scanId
# scandict['projid'] = 'Unknown'
scandict['scanNo'] = int(meta.scan)
scandict['subscanNo'] = int(meta.subscan)
scandict['source'] = meta.source
ra, dec = degrees(meta.radec)
scandict['ra'] = float(ra)
scandict['dec'] = float(dec)
scandict['startTime'] = float(meta.starttime_mjd)
scandict['stopTime'] = float(meta.endtime_mjd)
scandict['datasource'] = meta.datasource
scandict['scan_intent'] = meta.intent # assumes ,-delimited string
scandict['inttime'] = meta.inttime
band = heuristics.reffreq_to_band(meta.spw_reffreq)
scandict['band'] = band
# if preferences provided, it will connect them by a unique name
if preferences:
scandict['prefsname'] = preferences.name
scandict['searchtype'] = preferences.searchtype
scandict['fftmode'] = preferences.fftmode
# push scan info with unique id of scanId
index = indexprefix+'scans'
res = pushdata(scandict, index=index, Id=meta.scanId,
command='index')
if res == 1:
logger.info('Indexed scanId {0} to {1}'
.format(meta.scanId, index))
else:
logger.warn('Scan config not indexed for {0}'.format(meta.scanId))
if preferences:
indexprefs(preferences, indexprefix=indexprefix)
def indexscanstatus(scanId, indexprefix='new', **kwargs):
""" Update status fields for scanId
Can set field of 'nsegment', 'pending', 'finished', 'errors'.
"""
index = indexprefix+'scans'
allowed = ['nsegment', 'pending', 'finished', 'errors']
fieldlist = [field for (field, value) in iteritems(kwargs)
if field in allowed]
valuelist = [int(value) for (field, value) in iteritems(kwargs)
if field in allowed]
try:
res = update_fields(index, fieldlist, valuelist, scanId)
if res:
logger.info("Updated processing status for {0}: {1}"
.format(scanId, list(zip(fieldlist, valuelist))))
else:
logger.warn("Update of status for scan {0} failed".format(scanId))
except TransportError:
logger.warn("Could not update fields due to version conflict. Skipping this update.")
def indexprefs(preferences, indexprefix='new'):
""" Index preferences with id equal to hash of contents.
indexprefix allows specification of set of indices ('test', 'new').
Use indexprefix='new' for production.
"""
index = indexprefix+'preferences'
res = pushdata(preferences.ordered, index=index,
Id=preferences.name, command='index')
if res == 1:
logger.info('Indexed preference {0} to {1}'
.format(preferences.name, index))
else:
logger.debug('Preferences not indexed for {0}'.format(preferences.name))
def indexcands(candcollection, scanId, tags=None, url_prefix=None,
indexprefix='new'):
""" Takes candidate collection and pushes to index
Connects to preferences via hashed name
scanId is added to associate cand to a give scan.
Assumes scanId is defined as:
datasetId dot scanNo dot subscanNo.
tags is a comma-delimited string used to fill tag field in index.
indexprefix allows specification of set of indices ('test', 'new').
Use indexprefix='new' for production.
"""
from numpy import degrees, cos, radians
if tags is None:
tags = ''
index = indexprefix+'cands'
# create new tag string with standard format to fill in blanks
allowed_tags = ["rfi", "bad", "noise", "interesting", "astrophysical",
"mock"]
tagstr = ','.join([tag for tag in tags.split(',') if tag in allowed_tags])
candarr = candcollection.array
prefs = candcollection.prefs
candmjd = candcollection.candmjd
canddm = candcollection.canddm
canddt = candcollection.canddt
cluster = candcollection.cluster
clustersize = candcollection.clustersize
snrtot = candcollection.snrtot
st = candcollection.state
res = 0
for i in range(len(candarr)):
# get features. use .item() to cast to default types
canddict = dict(list(zip(candarr.dtype.names, candarr[i].item())))
# get reference ra, dec
segment = canddict['segment']
pc0 = st.get_pc(segment)
ra_ctr, dec_ctr = st.get_radec(pc=pc0)
# fill optional fields
canddict['scanId'] = scanId
datasetId, scan, subscan = scanId.rsplit('.', 2)
canddict['datasetId'] = datasetId
canddict['scan'] = int(scan)
canddict['subscan'] = int(subscan)
canddict['source'] = candcollection.metadata.source
canddict['tags'] = tagstr
canddict['tagcount'] = 0
canddict['candmjd'] = float(candmjd[i])
canddict['canddm'] = float(canddm[i])
canddict['canddt'] = float(canddt[i])
canddict['cluster'] = int(cluster[i])
canddict['clustersize'] = int(clustersize[i])
canddict['snrtot'] = float(snrtot[i])
canddict['ra'] = degrees(ra_ctr + canddict['l1']/cos(dec_ctr))
canddict['dec'] = degrees(dec_ctr + canddict['m1'])
canddict['png_url'] = ''
if prefs.name:
canddict['prefsname'] = prefs.name
# create id
uniqueid = candid(datadict=canddict)
canddict['candId'] = uniqueid
candidate_png = 'cands_{0}.png'.format(uniqueid)
canddict['png_url'] = os.path.join(url_prefix, indexprefix, candidate_png)
# assert os.path.exists(os.path.join(prefs.workdir, candidate_png)), "Expected png {0} for candidate.".format(candidate_png)
res += pushdata(canddict, index=index,
Id=uniqueid, command='index')
if res >= 1:
logger.debug('Indexed {0} cands for {1} to {2}'.format(res, scanId,
index))
else:
logger.debug('No cands indexed for {0}'.format(scanId))
return res
def indexmock(scanId, mocks=None, acc=None, indexprefix='new'):
""" Takes simulated_transient as used in state and pushes to index.
Assumes 1 mock in list for now.
indexprefix allows specification of set of indices ('test', 'new').
Use indexprefix='new' for production.
Option to submit mocks as tuple or part of analyze_cc future.
"""
from distributed import Future
# for realtime use
if mocks is None and acc is not None:
if isinstance(acc, Future):
ncands, mocks = acc.result()
else:
ncands, mocks = acc
if mocks is not None:
if len(mocks[0]) != 7:
logger.warn("mocks not in expected format ({0})".format(mocks))
index = indexprefix+'mocks'
mockdict = {}
mockdict['scanId'] = scanId
(seg, i0, dm, dt, amp, l, m) = mocks[0] # assume 1 mock
# TODO: support possible ampslope
mockdict['segment'] = int(seg)
mockdict['integration'] = int(i0)
mockdict['dm'] = float(dm)
mockdict['dt'] = float(dt)
mockdict['amp'] = float(amp)
mockdict['l'] = float(l)
mockdict['m'] = float(m)
res = pushdata(mockdict, Id=scanId, index=index,
command='index')
if res >= 1:
logger.info('Indexed {0} mocks for {1} to {2}'.format(res, scanId,
index))
if mocks is None or res == 0:
logger.info('No mocks indexed for {0}'.format(scanId))
def indexnoises(scanId, noises=None, noisefile=None, indexprefix='new'):
""" Takes noises as list or from noisefile and pushes to index.
scanId is added to associate cand to a give scan.
indexprefix allows specification of set of indices ('test', 'new').
"""
index = indexprefix+'noises'
doc_type = index.rstrip('s')
if noisefile is not None and noises is None:
from rfpipe.candidates import iter_noise
if os.path.exists(noisefile):
logger.info("Reading noises from {0}".format(noisefile))
noises = list(iter_noise(noisefile))
assert isinstance(noises, list)
count = 0
for noise in noises:
startmjd, deltamjd, segment, integration, noiseperbl, zerofrac, imstd = noise
Id = '{0}.{1}.{2}'.format(scanId, segment, integration)
if not es.exists(index=index, doc_type=doc_type, id=Id):
noisedict = {}
noisedict['scanId'] = str(scanId)
noisedict['startmjd'] = float(startmjd)
noisedict['deltamjd'] = float(deltamjd)
noisedict['segment'] = int(segment)
noisedict['integration'] = int(integration)
noisedict['noiseperbl'] = float(noiseperbl)
noisedict['zerofrac'] = float(zerofrac)
noisedict['imstd'] = float(imstd)
count += pushdata(noisedict, Id=Id, index=index,
command='index')
else:
logger.warn("noise index {0} already exists".format(Id))
if count:
logger.info('Indexed {0} noises for {1} to {2}'
.format(count, scanId, index))
if not count:
logger.info('No noises indexed for {0}'.format(scanId))
###
# Managing elasticsearch documents
###
def pushdata(datadict, index, Id=None, command='index', force=False):
""" Pushes dict to index, which can be:
candidates, scans, preferences, or noises
Assuming one elasticsearch doc_type per index (less the s)
Id for scan should be scanId, while for preferences should be hexdigest
Command can be 'index' or 'delete'.
To update, index with existing key and force=True.
"""
assert isinstance(datadict, dict)
# only one doc_type per index and its name is derived from index
doc_type = index.rstrip('s')
logger.debug('Pushing to index {0} with Id {1}'.format(index, Id))
res = 0
try:
if command == 'index':
if force:
res = es.index(index=index, doc_type=doc_type, id=Id,
body=datadict)
else:
if not es.exists(index=index, doc_type=doc_type, id=Id):
try:
res = es.index(index=index, doc_type=doc_type,
id=Id, body=datadict)
except RequestError:
logger.warn("Id {0} and data {1} not indexed due to request error."
.format(Id, datadict))
else:
logger.warn('Id={0} already exists in index {1}'
.format(Id, index))
elif command == 'delete':
if es.exists(index=index, doc_type=doc_type, id=Id):
res = es.delete(index=index, doc_type=doc_type, id=Id)
else:
logger.warn('Id={0} not in index'.format(Id))
if res:
return res['_shards']['successful']
else:
return res
except (ConnectionError, NewConnectionError):
logger.warn("ConnectionError during push to index. Elasticsearch down?")
def candid(datadict=None, cc=None):
""" Returns id string for given data dict
Assumes scanId is defined as:
datasetId dot scanNum dot subscanNum
"""
if datadict is not None and cc is None:
scanId = datadict['scanId']
segment = datadict['segment']
integration = datadict['integration']
dmind = datadict['dmind']
dtind = datadict['dtind']
return ('{0}_seg{1}-i{2}-dm{3}-dt{4}'
.format(scanId, segment, integration, dmind, dtind))
elif cc is not None and datadict is None:
scanId = cc.metadata.scanId
return ['{0}_seg{1}-i{2}-dm{3}-dt{4}'
.format(scanId, segment, integration, dmind, dtind)
for segment, integration, dmind, dtind, beamnum in cc.locs]
def update_field(index, field, value, Id=None, **kwargs):
""" Replace an index's field with a value.
Option to work on single Id or query the index with kwargs.
Use with caution.
"""
doc_type = index.rstrip('s')
if Id is None:
query = {"script": {"inline": "ctx._source.{0}='{1}'".format(field, value),
"lang": "painless"}}
query['retry_on_conflict'] = 5
if len(kwargs):
searchquery = {"match": kwargs}
else:
searchquery = {"match_all": {}}
query["query"] = searchquery
resp = es.update_by_query(body=query, doc_type=doc_type, index=index,
conflicts="proceed")
else:
query = {"doc": {field: value}}
resp = es.update(id=Id, body=query, doc_type=doc_type, index=index)
return resp['_shards']['successful']
def update_fields(index, fieldlist, valuelist, Id):
""" Updates multiple fields together for single Id.
Safer than update_field, which can produce doc conflicts if run rapidly.
"""
doc_type = index.rstrip('s')
doc = es.get(index, doc_type, Id)
doc['_source'].update(dict(zip(fieldlist, valuelist)))
res = es.index(index, doc_type, body=doc['_source'], id=Id)
return res['_shards']['successful']
def remove_tags(prefix, **kwargs):
""" Removes tags applied in portal
Can use keyword args to select subset of all candidates in cands index
"""
Ids = get_ids(prefix+"cands", **kwargs)
logger.info("Removing tags from {0} candidates in {1}".format(len(Ids), prefix+"cands"))
for Id in Ids:
doc = get_doc(prefix+"cands", Id)
tagnames = [key for key in doc['_source'].keys() if '_tags' in key]
if len(tagnames):
print("Removing tags {0} for Id {1}".format(tagnames, Id))
for tagname in tagnames:
resp = es.update(prefix+"cands", prefix+"cand", Id, {"script": 'ctx._source.remove("' + tagname + '")'})
resp = es.update(prefix+"cands", prefix+"cand", Id, {"script": 'ctx._source.tagcount = 0'})
def remove_ids(index, Ids=None, check=True, **kwargs):
""" Gets Ids from an index
doc_type derived from index name (one per index)
Can optionally pass key-value pairs of field-string to search.
Must match exactly (e.g., "scanId"="test.1.1")
"""
if Ids is None:
# delete all Ids in index
if not len(kwargs):
logger.warn("No Ids or query kwargs. Clearing all Ids in {0}"
.format(index))
Ids = get_ids(index, **kwargs)
if check:
confirm = input("Press any key to confirm removal of {0} ids from {1}."
.format(len(Ids), index))
else:
confirm = 'yes'
res = 0
if confirm.lower() in ['y', 'yes']:
for Id in Ids:
res += pushdata({}, index, Id, command='delete')
logger.info("Removed {0} docs from index {1}".format(res, index))
return res
def get_ids(index, *args, **kwargs):
""" Gets Ids from an index
doc_type derived from index name (one per index)
Can optionally pass arg for string query.
Can optionall pass query_string=<search field query>.
Can optionally pass key-value pairs of field-string to search.
Must match exactly (e.g., "scanId"="test.1.1")
"""
# only one doc_type per index and its name is derived from index
doc_type = index.rstrip('s')
if 'field' in kwargs:
field = kwargs.pop('field')
else:
field = 'false'
if len(args):
wildcard = '*' + '* *'.join(args) + '*'
logger.debug("Using arg as wildcard")
query = {"query":{"query_string": {"query": wildcard}}}
elif "query_string" in kwargs:
query = {"query":{"query_string": {"query": kwargs["query_string"]}}}
logger.info("Using query_string kwargs only")
elif len(kwargs):
query = {"query": {"match": kwargs}, "_source": field}
else:
query = {"query": {"match_all": {}}, "_source": field}
res = helpers.scan(es, index=index, doc_type=doc_type, query=query)
if field == 'false':
return [hit['_id'] for hit in res]
else:
return [(hit['_id'], hit['_source'][field]) for hit in res]
def get_doc(index, Id):
""" Get Id from index
"""
doc_type = index.rstrip('s')
doc = es.get(index=index, doc_type=doc_type, id=Id)
return doc
###
# Using index
###
def create_preference(index, Id):
""" Get doc from index with Id and create realfast preference object
"""
from rfpipe.preferences import Preferences
doc = get_doc(index, Id)
prefs = doc['_source']
return Preferences(**prefs)
###
# Managing docs between indexprefixes
###
def move_dataset(indexprefix1, indexprefix2, datasetId=None, scanId=None, force=False):
""" Given two index prefixes, move a datasetId or scanId and all associated docs over.
This will delete the original documents in indexprefix1.
If indexprefix2 is None, then datasetId is removed from indexprefix1.
"""
iddict0 = {indexprefix1+'cands': [], indexprefix1+'scans': [],
indexprefix1+'mocks': [], indexprefix1+'noises': [],
indexprefix1+'preferences': []}
if scanId is None and datasetId is not None:
scanids = get_ids(indexprefix1 + 'scans', datasetId=datasetId)
else:
scanids = [scanId]
for scanId in scanids:
iddict = copy_all_docs(indexprefix1, indexprefix2, scanId=scanId, force=force)
for k, v in iddict.items():
for Id in v:
if Id not in iddict0[k]:
iddict0[k].append(Id)
count = sum([len(v) for k, v in iteritems(iddict0)])
if not force:
if (datasetId is not None):
confirm = input("Remove dataset {0} from {1} with {2} ids in all indices?"
.format(datasetId, indexprefix1, count))
if (scanId is not None):
confirm = input("Remove scanId {0} from {1} with {2} ids in all indices? {3}"
.format(scanId, indexprefix1, count, iddict0))
else:
confirm = 'y'
# first remove Ids
if confirm.lower() in ['y', 'yes']:
for k, v in iddict0.items():
if k != indexprefix1 + 'preferences':
remove_ids(k, v, check=False)
# remove old cand pngs
if k == indexprefix1 + 'cands':
for Id in v:
candplot1 = ('/lustre/aoc/projects/fasttransients/realfast/plots/{0}/cands_{1}.png'
.format(indexprefix1, Id))
if os.path.exists(candplot1):
os.remove(candplot1)
# remove old summary htmls
if k == indexprefix1 + 'scans':
logger.info("{0} {1}".format(k, v))
for Id in v:
summary1 = ('/lustre/aoc/projects/fasttransients/realfast/plots/{0}/cands_{1}.html'
.format(indexprefix1, Id))
if os.path.exists(summary1):
os.remove(summary1)
# test whether other scans are using prefsname
prefsnames = iddict0[indexprefix1 + 'preferences']
for prefsname in prefsnames:
if not len(get_ids(indexprefix1 + 'scans', prefsname=prefsname)):
remove_ids(indexprefix1 + 'preferences', [prefsname], check=False)
else:
logger.info("prefsname {0} is referred to in {1}. Not deleting"
.format(Id, k))
# TODO: remove png and html files after last move
def copy_all_docs(indexprefix1, indexprefix2, candId=None, scanId=None, force=False):
""" Move docs from 1 to 2 that are associated with a candId or scanId.
scanId includes multiple candIds, which will all be moved.
Specifying a candId will only move the candidate, scanId, and associated products (not all cands).
Associated docs include scanId, preferences, mocks, etc.
If scanId provided, all docs moved.
If candId provided, only that one will be selected from all in scanId.
If indexprefix2 is None, then the dict of all docs in indexprefix1 is returned.
"""
if candId is not None:
logger.info("Finding docs with candId {0}".format(candId))
elif scanId is not None:
logger.info("Finding docs with scanId {0}".format(scanId))
iddict = find_docids(indexprefix1, candId=candId, scanId=scanId)
if indexprefix2 is not None:
assert os.path.exists('/lustre/aoc/projects/fasttransients/realfast/plots'), 'Only works on AOC lustre'
for k, v in iddict.items():
for Id in v:
if (candId is not None) and (candId != Id) and (k == indexprefix1 + 'cands'):
pass
try:
result = copy_doc(k, k.replace(indexprefix1, indexprefix2), Id, force=force)
except NotFoundError:
logger.warn("Id {0} not found in {1}".format(Id, k))
# update png_url to new prefix and move plot
if (k == indexprefix1+'cands') and result:
png_url = get_doc(index=indexprefix1+'cands', Id=Id)['_source']['png_url']
update_field(indexprefix2+'cands', 'png_url',
png_url.replace(indexprefix1, indexprefix2),
Id=Id)
candplot1 = ('/lustre/aoc/projects/fasttransients/realfast/plots/{0}/cands_{1}.png'
.format(indexprefix1, Id))
candplot2 = ('/lustre/aoc/projects/fasttransients/realfast/plots/{0}/cands_{1}.png'
.format(indexprefix2, Id))
if os.path.exists(candplot1):
shutil.move(candplot1, candplot2)
logger.info("Updated png_url field and moved plot for {0} from {1} to {2}"
.format(Id, indexprefix1,
indexprefix2))
# if os.path.exists(candplot1):
# os.remove(candplot1)
# else:
# logger.warn("Problem updating or moving png_url {0} from {1} to {2}"
# .format(Id, indexprefix1,
# indexprefix2))
else:
logger.warn("Could not find file {0}".format(candplot1))
elif not result:
logger.info("Did not copy {0} from {1} to {2}"
.format(Id, indexprefix1, indexprefix2))
# copy summary html file
if k == indexprefix1+'scans':
summary1 = ('/lustre/aoc/projects/fasttransients/realfast/plots/{0}/cands_{1}.html'
.format(indexprefix1, v[0]))
summary2 = ('/lustre/aoc/projects/fasttransients/realfast/plots/{0}/cands_{1}.html'
.format(indexprefix2, v[0]))
if os.path.exists(summary1):
success = shutil.copy(summary1, summary2)
return iddict
def candid_bdf(indexprefix, candId, bdfdir='/lustre/evla/wcbe/data/realfast'):
""" Given candId in indexprefix, list the bdfname, if it exists.
"""
doc = get_doc(indexprefix+'cands', Id=candId)
if 'sdmname' in doc['_source']:
sdmname = doc['_source']['sdmname']
logger.info("CandId {0} has sdmname {1}".format(candId, sdmname))
bdfint = sdmname.split('_')[-1]
bdfname = os.path.join(bdfdir, 'uid____evla_realfastbdf_' + bdfint)
if os.path.exists(bdfname):
return bdfname
else:
logger.warn("No bdf found for {0}".format(sdmname))
return None
else:
logger.warn("No SDM found for {0}".format(candId))
return None
def remove_dataset(indexprefix, datasetId=None, scanId=None, force=False):
""" Use dataset or scanId to remove bdfs, indexed data, and plots/html.
On the CBE, this will remove bdfs, while at the AOC, it manages the rest.
"""
if os.path.exists('/lustre/aoc/projects/fasttransients/realfast/plots'):
logger.info("On the AOC, removing scanId from index and plots/html")
move_dataset(indexprefix, None, datasetId=datasetId, scanId=scanId, force=force)
else:
logger.info("On the CBE, removing bdfs")
if scanId is not None:
Ids = get_ids(indexprefix + 'cands', scanId=scanId)
Id = scanId
elif datasetId is not None:
Ids = get_ids(indexprefix + 'cands', datasetId=datasetId)
Id = datasetId
confirm = input("Remove bdfs associated with {0} candidates in {1}?".format(len(Ids), Id))
if confirm.lower() in ['y', 'yes']:
remove_bdfs(indexprefix, Ids)
def remove_bdfs(indexprefix, candIds):
""" Given candIds, look up bdf name in indexprefix and then remove bdf
"""
for Id in candIds:
bdfname = candid_bdf(indexprefix, Id)
if bdfname is not None:
os.remove(bdfname)
logger.info('Removed {0}'.format(bdfname))
def find_docids(indexprefix, candId=None, scanId=None):
""" Find docs associated with a candId or scanId.
Finds relations based on scanId, which ties all docs together.
Returns a dict with keys of the index name and values of the related ids.
scanId includes multiple candIds, which will all be moved.
Specifying a candId will only move the candidate, scanId, and associated products (not all cands).
A full index set has:
- cands indexed by candId (has scanId field)
- scans indexed by scanId
- preferences indexed by preferences name (in scans index)
- mocks indexed by scanId (has scanId field)
- noises indexed by noiseId (has scanId field)
"""
docids = {}
# option 1: give a candId to get scanId and then other docs
if candId is not None and scanId is None:
scanId = candId.split("_seg")[0]
# option 2: use scanId given as argument or from above
if scanId is not None:
# use scanId to get ids with one-to-many mapping
for ind in ['cands', 'mocks', 'noises']:
index = indexprefix + ind
try:
ids = get_ids(index, scanId=scanId)
# if searching by candId, then only define that one
if ((candId is not None) and (ind == 'cands')):
docids[index] = [candId]
else:
docids[index] = ids
except NotFoundError:
logger.warn("Id {0} not found in {1}".format(scanId, index))
# get prefsname from scans index
try:
index = indexprefix + 'scans'
docids[index] = [scanId]
prefsname = es.get(index=index, doc_type=index.rstrip('s'), id=scanId)['_source']['prefsname']
index = indexprefix + 'preferences'
docids[index] = [prefsname]
except NotFoundError:
logger.warn("Id {0} not found in {1}".format(scanId, index))
return docids
def audit_indexprefix(indexprefix):
""" Confirm that all candids map to scanids, prefnames, and pngs.
Confirm that scanids mocks, noises.
Also test that candids have plots and summaryplots.
"""
import requests
scanIds = get_ids(indexprefix+'scans')
candIds = get_ids(indexprefix+'cands')
mockIds = get_ids(indexprefix+'mocks')
noiseIds = get_ids(indexprefix+'noises')
failed = 0
for candId in candIds:
doc = get_doc(indexprefix+'cands', candId)
# 1) is candId tied to scanId?
candIdscanId = doc['_source']['scanId']
if candIdscanId not in scanIds:
failed += 1
logger.warn("candId {0} has scanId {1} that is not in {2}"
.format(candId, candIdscanId, indexprefix+'scans'))
# 2) Is candId prefs indexed?
prefsname = doc['_source']['prefsname']
if prefsname not in get_ids(indexprefix+'preferences'):
failed += 1
logger.warn("candId {0} has prefsname {1} that is not in {2}"
.format(candId, prefsname, indexprefix+'preferences'))
# 3) Is candId png_url in right place?
png_url = doc['_source']['png_url']
if requests.get(png_url).status_code != 200:
failed += 1
logger.warn("candId {0} png_url {1} is not accessible"
.format(candId, png_url))
# 4) Does candId have summary plot?
summary_url = ('http://realfast.nrao.edu/plots/{0}/cands_{1}.html'
.format(indexprefix, candIdscanId))
if requests.get(summary_url).status_code != 200:
failed += 1
logger.warn("candId {0} summary plot {1} is not accessible"
.format(candId, summary_url))
logger.info("{0} of {1} candIds have issues".format(failed, len(candIds)))
failed = 0
for scanId in scanIds:
doc = get_doc(indexprefix+'scans', scanId)
# 5) Is scanId prefs indexed?
prefsname = doc['_source']['prefsname']
if prefsname not in get_ids(indexprefix+'preferences'):
failed += 1
logger.warn("scanId {0} has prefsname {1} that is not in {2}"
.format(scanId, prefsname, indexprefix+'preferences'))
logger.info("{0} of {1} scanIds have issues".format(failed, len(scanIds)))
failed = 0
for mockId in mockIds:
doc = get_doc(indexprefix+'mocks', mockId)
# 6) is mockId tied to scanId?
mockIdscanId = doc['_source']['scanId']
if mockIdscanId not in scanIds:
failed += 1
logger.warn("mockId {0} has scanId {1} that is not in {2}"
.format(mockId, mockIdscanId, indexprefix+'scans'))
logger.info("{0} of {1} mockIds have issues".format(failed, len(mockIds)))
failed = 0
for noiseId in noiseIds:
doc = get_doc(indexprefix+'noises', noiseId)
# 7) is noiseId tied to scanId?
noiseIdscanId = doc['_source']['scanId']
if noiseIdscanId not in scanIds:
failed += 1
logger.warn("noiseId {0} has scanId {1} that is not in {2}"
.format(noiseId, noiseIdscanId, indexprefix+'scans'))
logger.info("{0} of {1} noiseIds have issues".format(failed, len(noiseIds)))
def move_consensus(indexprefix1='new', indexprefix2='final', match='identical',
consensustype='majority', nop=3, newtags=None, consensus=None,
datasetId=None, candId=None, ignoretags=' ', force=False):
""" Given candids, copies relevant docs from indexprefix1 to indexprefix2.
newtags will append to the new "tags" field for all moved candidates.
Default tags field will contain the user consensus tag.
Can optionally define consensus elsewhere and pass it in.
"""
if consensus is None:
consensus = get_consensus(indexprefix=indexprefix1, nop=nop, match=match,
consensustype=consensustype, newtags=newtags,
datasetId=datasetId, candId=candId, ignoretags=ignoretags)
logger.info("Moving {0} consensus candidates from {1} to {2}"
.format(len(consensus), indexprefix1, indexprefix2))
datasetIds = []
for candId, tags in iteritems(consensus):
scanId = candId.split('_seg')[0]
datasetIds.append('.'.join(scanId.split('.')[:-2]))
# check remaining docs
iddict = copy_all_docs(indexprefix1, indexprefix2, candId=candId, force=force)
# set tags field
update_field(indexprefix2+'cands', 'tags',
consensus[candId]['tags'], Id=candId)
res = pushdata({}, indexprefix1+'cands', candId, command='delete')
# wait for transfer to complete
sleep(3)
# (re)move any datasets with no associated cands
index = indexprefix1+'cands'
datasetIds = set(datasetIds)
logger.info("Checking whether datasetIds {0} remain in {1}".format(datasetIds, index))
for datasetId in datasetIds:
ncands = len(get_ids(index, datasetId=datasetId))
if not ncands:
logger.info("No cands from dataset {0} remain. Removing dataset from {1}"
.format(datasetId, indexprefix1))
move_dataset(indexprefix1, indexprefix2, datasetId, force=force)
else:
logger.info("{0} candidates remain for dataset {1}".format(ncands, datasetId))
def get_consensus(indexprefix='new', nop=3, consensustype='majority',
res='consensus', match='identical', newtags=None,
datasetId=None, ignoretags=' ', candId=None):
""" Get candidtes with consensus over at least nop user tag fields.
Argument consensustype: "absolute" (all agree), "majority" (most agree).
Returns dicts with either consensus and noconsensus candidates (can add "tags" too).
match defines how tags are compared:
- "identical" => string match,
- "bad" => find rfi, instrumental, delete, unsure/noise in tags
-- bad+absolute => all must be in bad list
-- bad+majority => majority must be "delete".
"notify" => notify found (only implemented for absolute).
newtags is a comma-delimited string that sets tags to apply to all.
Can optionally only consider candidates in datasetId.
Can optionally ignore tags from specific users with ignoretags.
"""
assert consensustype in ["absolute", "majority"]
assert res in ["consensus", "noconsensus"]
if indexprefix == 'final':
logger.warn("Looking at final indices, which should not be modified.")
index = indexprefix+'cands'
doc_type = index.rstrip('s')
ids = []
for ni in range(nop, 10): # do not expect more than 10 voters
idlist = get_ids(index=index, tagcount=ni)
logger.info("Got {0} cands with tagcount={1}".format(len(idlist), ni))
ids += idlist
assert match in ['identical', 'bad', 'notify']
badlist = ["rfi", "instrumental", "delete", "unsure/noise"]
consensus = {}
noconsensus = {}
for Id in ids:
# select only datasetId candidates, if desired
if datasetId is not None and datasetId not in Id:
continue
if candId is not None and candId != Id:
continue
tagsdict = gettags(indexprefix=indexprefix, Id=Id, ignore=ignoretags)
logger.debug("Id {0} has {1} tags".format(Id, len(tagsdict)))
tagslist = list(tagsdict.values())
# add Id and tags to dict according to consensus opinion
if consensustype == 'absolute':
if match == 'identical':
if all([tagslist[0] == val for val in tagslist]):
tagsdict['tags'] = tagslist[0]
if newtags is not None:
tagsdict['tags'] += ','+newtags
consensus[Id] = tagsdict
else:
noconsensus[Id] = tagsdict
elif match == 'bad':
if all([tag in badlist for tags in tagslist for tag in tags.split(',')]):
tagsdict['tags'] = tagslist[0]
if newtags is not None:
tagsdict['tags'] += ','+newtags
consensus[Id] = tagsdict
else:
noconsensus[Id] = tagsdict
elif match == 'notify':
if all(['notify' in tags for tags in tagslist]):
tagsdict['tags'] = tagslist[0]
if newtags is not None:
tagsdict['tags'] += ','+newtags
consensus[Id] = tagsdict
else:
noconsensus[Id] = tagsdict
elif consensustype == 'majority':
# break out all tags (could be multiple per user)
alltags = [tag for tags in tagslist for tag in tags.split(',')]
# sort by whether tag is agreed upon by majority
consensus_tags = []
noconsensus_tags = []
for tag in alltags:
if (tag in consensus_tags) or (tag in noconsensus_tags):
continue
if (alltags.count(tag) >= len(tagslist)//2+1):
if match == 'identical':
consensus_tags.append(tag)
elif (match == 'bad') and (tag == 'delete'):
consensus_tags.append(tag)
else:
noconsensus_tags.append(tag)
else:
noconsensus_tags.append(tag)
if newtags is not None:
for newtag in newtags.split(','):
consensus_tags.append(newtag)
if res == 'consensus' and len(consensus_tags):
tagsdict['tags'] = ','.join(consensus_tags)
consensus[Id] = tagsdict
elif res == 'noconsensus' and len(noconsensus_tags):
tagsdict['tags'] = ','.join(noconsensus_tags)
noconsensus[Id] = tagsdict
else:
logger.exception("consensustype {0} not recognized"
.format(consensustype))
logger.info("Consensus found for {0} candidates in prefix {1}"
.format(len(consensus), indexprefix))
if res == 'consensus':
logger.info("Returning consensus candidates")
return consensus
elif res == 'noconsensus':
logger.info("Returning candidates without consensus")
return noconsensus
def resolve_consensus(indexprefix='new', nop=3, consensustype='majority',
match='identical', datasetId=None):
""" Step through noconsensus candidates and decide their fate.
"""
nocon = get_consensus(indexprefix=indexprefix, nop=nop,
consensustype=consensustype, res='noconsensus',
match=match, datasetId=None)
baseurl = 'http://realfast.nrao.edu/plots/' + indexprefix
con = {}
try:
for k,v in nocon.items():
logger.info("Candidate {0}:".format(k))
logger.info("\tpng_url\t{0}/cands_{1}.png".format(baseurl, k))
tags = v['tags'].split(',')
logger.info("\tUser tags\t{0}".format([(vk, vv) for (vk, vv) in v.items() if '_tags' in vk]))
selection = input("Set consensus tags (<int>,<int> or <cr> to skip): {0}".format(list(enumerate(tags))))
if selection:
selection = selection.replace(',', ' ').split()
selection = reversed(sorted([int(sel) for sel in selection])) # preserves order used in web app
newtags = []
for sel in selection:
newtags.append(tags[sel])
v['tags'] = ','.join(newtags)
con[k] = v
except KeyboardInterrupt:
logger.info("Escaping loop")
return con
return con
def gettags(indexprefix, Id, ignore=' '):
""" Get cand Id in for indexprefix
return dict with all tags.
"""
index = indexprefix+'cands'
doc_type = index.rstrip('s')
doc = es.get(index=index, doc_type=doc_type, id=Id)
tagsdict = dict(((k, v) for (k, v) in doc['_source'].items() if ('_tags' in k) and (ignore not in k)))
return tagsdict
def copy_doc(index1, index2, Id, deleteorig=False, force=False):
""" Take doc in index1 with Id and move to index2
Default is to copy, but option exists to "move" by deleting original.
using force=True will override ban on operating from final indices.
"""
if not force:
assert 'final' not in index1
doc_type1 = index1.rstrip('s')
doc_type2 = index2.rstrip('s')
doc = es.get(index=index1, doc_type=doc_type1, id=Id)
res = es.index(index=index2, doc_type=doc_type2, id=Id,
body=doc['_source'])
if res['_shards']['successful']:
if deleteorig:
res = es.delete(index=index1, doc_type=doc_type1, id=Id)
else:
logger.warn("Move of {0} from index {1} to {2} failed".format(Id,
index1,
index2))
return res['_shards']['successful']
###
# Set up indices
###
def create_indices(indexprefix):
""" Create standard set of indices,
cands, scans, preferences, mocks, noises
"""
body = {"settings": {
"index": {
"number_of_replicas": 0
},
"analysis": {
"analyzer": {
"default": {"tokenizer": "whitespace"}
}
}
},
}
body_preferences = body.copy()
body_preferences['mappings'] = {indexprefix+"preference": {
"properties": {
"flaglist": {"type": "text"},
"calcfeatures": {"type": "text"}
}
}
}
indices = ['scans', 'cands', 'preferences', 'mocks', 'noises']
for index in indices:
fullindex = indexprefix+index
if es.indices.exists(index=fullindex):
confirm = input("Index {0} exists. Delete?".format(fullindex))
if confirm.lower() in ['y', 'yes']:
es.indices.delete(index=fullindex)
if index != 'preferences':
es.indices.create(index=fullindex, body=body)
else:
es.indices.create(index=fullindex, body=body_preferences)
def reset_indices(indexprefix, deleteindices=False):
""" Remove entries from set of indices with a given indexprefix.
indexprefix allows specification of set of indices ('test', 'new').
Use indexprefix='new' for production.
deleteindices will delete indices, too.
*BE SURE YOU KNOW WHAT YOU ARE DOING*
"""
logger.warn("Erasing all docs from indices with prefix {0}"
.format(indexprefix))
for index in [indexprefix+'noises', indexprefix+'mocks',
indexprefix+'cands', indexprefix+'scans',
indexprefix+'preferences']:
res = remove_ids(index)
if deleteindices:
es.indices.delete(index)
logger.info("Removed {0} index".format(index))
def rsync(original, new):
""" Uses subprocess.call to rsync from 'filename' to 'new'
If new is directory, copies original in.
If new is new file, copies original to that name.
"""
assert os.path.exists(original), 'Need original file!'
res = subprocess.call(["rsync", "-a", original.rstrip('/'), new.rstrip('/')])
return int(res == 0)
|
caseyjlaw/vlart
|
realfast/elastic.py
|
Python
|
bsd-3-clause
| 45,120
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Daniel Zhang (張道博)'
__copyright__ = 'Copyright (c) 2013, University of Hawaii Smart Energy Project'
__license__ = 'https://raw.github' \
'.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid/master/BSD' \
'-LICENSE.txt'
from msg_db_connector import MSGDBConnector
from msg_db_util import MSGDBUtil
import psycopg2
import psycopg2.extras
class MECODBReader(object):
"""
Read records from a database.
"""
def __init__(self, testing = False):
"""
Constructor.
:param testing: True if in testing mode.
"""
self.connector = MSGDBConnector()
self.conn = MSGDBConnector(testing).connectDB()
self.dbUtil = MSGDBUtil()
self.dbName = self.dbUtil.getDBName(self.connector.dictCur)
def selectRecord(self, conn, table, keyName, keyValue):
"""
Read a record in the database given a table name, primary key name,
and value for the key.
:param conn DB connection
:param table DB table name
:param keyName DB column name for primary key
:param keyValue Value to be matched
:returns: Row containing record data.
"""
print "selectRecord:"
sql = """SELECT * FROM "%s" WHERE %s = %s""" % (
table, keyName, keyValue)
dcur = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
self.dbUtil.executeSQL(dcur, sql)
row = dcur.fetchone()
return row
def readingAndMeterCounts(self):
"""
Retrieve the reading and meter counts.
:returns: Multiple lists containing the retrieved data.
"""
sql = """SELECT "Day", "Reading Count",
"Meter Count" FROM count_of_readings_and_meters_by_day"""
dcur = self.conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
self.dbUtil.executeSQL(dcur, sql)
rows = dcur.fetchall()
dates = []
meterCounts = []
readingCounts = []
for row in rows:
dates.append(row[0])
readingCounts.append(row[1] / row[2])
meterCounts.append(row[2])
return dates, readingCounts, meterCounts
|
Hawaii-Smart-Energy-Project/Maui-Smart-Grid
|
src/meco_db_read.py
|
Python
|
bsd-3-clause
| 2,247
|
from django.utils import timezone
from test_plus import TestCase
from .factories import WineFactory, ReviewFactory
class TestWine(TestCase):
wine = WineFactory()
def test__str__(self):
self.assertEqual(
self.wine.__str__(),
'Alvaro Palacios Finca Dofi 1991'
)
class TestReview(TestCase):
review = ReviewFactory()
timestamp = timezone.now().strftime("%Y-%m-%d %H:%M:%S")
def test__str__(self):
self.assertEqual(
self.review.__str__(),
'user-0' + self.timestamp
)
|
REBradley/WineArb
|
winearb/reviews/tests/test_models.py
|
Python
|
bsd-3-clause
| 569
|
# BSD Licence
# Copyright (c) 2012, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
'''
Functions for manipulating URLs.
Created on 27 Sep 2011
@author: rwilkinson
'''
import urlparse
def get_base_url(url):
"""Returns the base part of a URL, i.e., excluding any characters after the last /.
@param url - URL to process
@return base URL
"""
parts = url.rpartition('/')
return parts[0] + (parts[1] if len(parts) >= 2 else '')
def get_server_url(url):
"""Returns the part of a URL specifying the server, i.e., the schema and net location.
@param url - URL to process
@return server URL
"""
parts = urlparse.urlparse(url)
return urlparse.urlunparse((parts.scheme, parts.netloc, '', '', '', ''))
def is_relative(url):
"""Determines whether a URL or URL fragment is relative, i.e., does not start with a scheme and
net location and does not start with a /.
@param url - URL to process
@return True if URL is relative, else False
"""
parts = urlparse.urlparse(url)
return not (parts.scheme or parts.netloc or url.startswith('/'))
def is_server_relative(url):
"""Determines whether a URL or URL fragment is relative to a server location, i.e., does not
start with a scheme and net location and but does start with a /.
@param url - URL to process
@return True if URL is relative to server, else False
"""
parts = urlparse.urlparse(url)
return not (parts.scheme or parts.netloc) and url.startswith('/')
def make_url(base_url, url):
"""Combines a URL with a base URL as follows:
If the URL is absolute (contains a scheme and net location) return the URL.
If the URL is relative (does not contain a scheme and net location and does not begin with a /)
return the base URL followed by the URL.
If the URL is relative is relative to the server location (does not contain a scheme and net
location and begins with a /) return the scheme and net location from the base URL followed
by the URL.
@param base_url - base URL
@param url - URL
@return combined URL
"""
if is_relative(url):
return_url = base_url + url
elif is_server_relative(url):
return_url = get_server_url(base_url) + url
else:
return_url = url
return return_url
|
cedadev/dapbench
|
dapbench/thredds/lib/url_util.py
|
Python
|
bsd-3-clause
| 2,448
|
"""Compiler for Netkit"""
import os
import autonetkit
import autonetkit.config
import autonetkit.log as log
import autonetkit.plugins.naming as naming
from autonetkit.compilers.platform.platform_base import PlatformCompiler
import string
import itertools
from autonetkit.ank_utils import alphabetical_sort as alpha_sort
from autonetkit.compilers.device.quagga import QuaggaCompiler
from autonetkit.nidb import config_stanza
class NetkitCompiler(PlatformCompiler):
"""Netkit Platform Compiler"""
@staticmethod
def index_to_int_id(index):
"""Maps interface index to ethx e.g. eth0, eth1, ..."""
return "eth%s" % index
def compile(self):
log.info("Compiling Netkit for %s" % self.host)
g_phy = self.anm['phy']
quagga_compiler = QuaggaCompiler(self.nidb, self.anm)
# TODO: this should be all l3 devices not just routers
for phy_node in g_phy.l3devices(host=self.host, syntax='quagga'):
folder_name = naming.network_hostname(phy_node)
nidb_node = self.nidb.node(phy_node)
nidb_node.add_stanza("render")
#TODO: order by folder and file template src/dst
nidb_node.render.base = os.path.join("templates","quagga")
nidb_node.render.template = os.path.join("templates",
"netkit_startup.mako")
nidb_node.render.dst_folder = os.path.join("rendered",
self.host, "netkit")
nidb_node.render.base_dst_folder = os.path.join("rendered",
self.host, "netkit", folder_name)
nidb_node.render.dst_file = "%s.startup" % folder_name
nidb_node.render.custom = {
'abc': 'def.txt'
}
# allocate zebra information
nidb_node.add_stanza("zebra")
if nidb_node.is_router():
nidb_node.zebra.password = "1234"
hostname = folder_name
if hostname[0] in string.digits:
hostname = "r" + hostname
nidb_node.hostname = hostname # can't have . in quagga hostnames
nidb_node.add_stanza("ssh")
nidb_node.ssh.use_key = True # TODO: make this set based on presence of key
# Note this could take external data
int_ids = itertools.count(0)
for interface in nidb_node.physical_interfaces:
numeric_id = int_ids.next()
interface.numeric_id = numeric_id
interface.id = self.index_to_int_id(numeric_id)
# and allocate tap interface
nidb_node.add_stanza("tap")
nidb_node.tap.id = self.index_to_int_id(int_ids.next())
quagga_compiler.compile(nidb_node)
if nidb_node.bgp:
nidb_node.bgp.debug = True
static_routes = []
nidb_node.zebra.static_routes = static_routes
# and lab.conf
self.allocate_tap_ips()
self.lab_topology()
def allocate_tap_ips(self):
"""Allocates TAP IPs"""
settings = autonetkit.config.settings
lab_topology = self.nidb.topology[self.host]
from netaddr import IPNetwork
address_block = IPNetwork(settings.get("tapsn")
or "172.16.0.0/16").iter_hosts() # added for backwards compatibility
lab_topology.tap_host = address_block.next()
lab_topology.tap_vm = address_block.next() # for tunnel host
for node in sorted(self.nidb.l3devices(host=self.host)):
node.tap.ip = address_block.next()
def lab_topology(self):
# TODO: replace name/label and use attribute from subgraph
lab_topology = self.nidb.topology[self.host]
lab_topology.render_template = os.path.join("templates",
"netkit_lab_conf.mako")
lab_topology.render_dst_folder = os.path.join("rendered",
self.host, "netkit")
lab_topology.render_dst_file = "lab.conf"
lab_topology.description = "AutoNetkit Lab"
lab_topology.author = "AutoNetkit"
lab_topology.web = "www.autonetkit.org"
host_nodes = list(
self.nidb.nodes(host=self.host, platform="netkit"))
if not len(host_nodes):
log.debug("No Netkit hosts for %s" % self.host)
# also need collision domains for this host
cd_nodes = self.nidb.nodes("broadcast_domain", host=self.host)
host_nodes += cd_nodes
subgraph = self.nidb.subgraph(host_nodes, self.host)
lab_topology.machines = " ".join(alpha_sort(naming.network_hostname(phy_node)
for phy_node in subgraph.l3devices()))
lab_topology.config_items = []
for node in sorted(subgraph.l3devices()):
for interface in node.physical_interfaces:
broadcast_domain = str(interface.ipv4_subnet).replace("/", ".")
#netkit lab.conf uses 1 instead of eth1
numeric_id = interface.numeric_id
stanza = config_stanza(
device=naming.network_hostname(node),
key=numeric_id,
value=broadcast_domain,
)
lab_topology.config_items.append(stanza)
lab_topology.tap_ips = []
for node in subgraph:
if node.tap:
stanza = config_stanza(
device=naming.network_hostname(node),
id=node.tap.id.replace("eth", ""), # strip ethx -> x
ip=node.tap.ip,
)
lab_topology.tap_ips.append(stanza)
lab_topology.tap_ips = sorted(lab_topology.tap_ips, key = lambda x: x.ip)
lab_topology.config_items = sorted(lab_topology.config_items, key = lambda x: x.device)
|
sysbot/autonetkit
|
autonetkit/compilers/platform/netkit.py
|
Python
|
bsd-3-clause
| 5,735
|
#!/usr/bin/env python2.7
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
import os
import subprocess
import sys
import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
os.makedirs('reports')
port_server_port = 32766
start_port_server.start_port_server(port_server_port)
def fnize(s):
out = ''
for c in s:
if c in '<>, /':
if len(out) and out[-1] == '_': continue
out += '_'
else:
out += c
return out
# index html
index_html = """
<html>
<head>
<title>Microbenchmark Results</title>
</head>
<body>
"""
def heading(name):
global index_html
index_html += "<h1>%s</h1>\n" % name
def link(txt, tgt):
global index_html
index_html += "<p><a href=\"%s\">%s</a></p>\n" % (tgt, txt)
def text(txt):
global index_html
index_html += "<p><pre>%s</pre></p>\n" % txt
def collect_latency(bm_name, args):
"""generate latency profiles"""
benchmarks = []
profile_analysis = []
cleanup = []
heading('Latency Profiles: %s' % bm_name)
subprocess.check_call(
['make', bm_name,
'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
'--benchmark_list_tests']).splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
jobset.JobSpec(['bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' % line],
environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}))
profile_analysis.append(
jobset.JobSpec([sys.executable,
'tools/profiling/latency_profile/profile_analyzer.py',
'--source', '%s.trace' % fnize(line), '--fmt', 'simple',
'--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None))
cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
# periodically flush out the list of jobs: profile_analysis jobs at least
# consume upwards of five gigabytes of ram in some cases, and so analysing
# hundreds of them at once is impractical -- but we want at least some
# concurrency or the work takes too long
if len(benchmarks) >= min(4, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def collect_perf(bm_name, args):
"""generate flamegraphs"""
heading('Flamegraphs: %s' % bm_name)
subprocess.check_call(
['make', bm_name,
'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
benchmarks = []
profile_analysis = []
cleanup = []
for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
'--benchmark_list_tests']).splitlines():
link(line, '%s.svg' % fnize(line))
benchmarks.append(
jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
'-g', '-F', '997',
'bins/mutrace/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
'--benchmark_min_time=10']))
profile_analysis.append(
jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
environ = {
'PERF_BASE_NAME': fnize(line),
'OUTPUT_DIR': 'reports',
'OUTPUT_FILENAME': fnize(line),
}))
cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
# periodically flush out the list of jobs: temporary space required for this
# processing is large
if len(benchmarks) >= 20:
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=1,
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=1,
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def collect_summary(bm_name, args):
heading('Summary: %s' % bm_name)
subprocess.check_call(
['make', bm_name,
'CONFIG=counters', '-j', '%d' % multiprocessing.cpu_count()])
cmd = ['bins/counters/%s' % bm_name,
'--benchmark_out=out.json',
'--benchmark_out_format=json']
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
text(subprocess.check_output(cmd))
if args.bigquery_upload:
with open('out.csv', 'w') as f:
f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.json']))
subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', 'out.csv'])
collectors = {
'latency': collect_latency,
'perf': collect_perf,
'summary': collect_summary,
}
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
argp.add_argument('-c', '--collect',
choices=sorted(collectors.keys()),
nargs='+',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument('-b', '--benchmarks',
default=['bm_fullstack'],
nargs='+',
type=str,
help='Which microbenchmarks should be run')
argp.add_argument('--bigquery_upload',
default=False,
action='store_const',
const=True,
help='Upload results from summary collection to bigquery')
argp.add_argument('--summary_time',
default=None,
type=int,
help='Minimum time to run benchmarks for the summary collection')
args = argp.parse_args()
for bm_name in args.benchmarks:
for collect in args.collect:
collectors[collect](bm_name, args)
index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f:
f.write(index_html)
|
infinit/grpc
|
tools/run_tests/run_microbenchmark.py
|
Python
|
bsd-3-clause
| 8,954
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
db_engine = settings.DATABASES['default']['ENGINE']
if db_engine.rfind('mysql') == -1:
# Adding model 'Country'
db.create_table('atlas_country', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('country_code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=2, db_index=True)),
('border', self.gf('django.contrib.gis.db.models.fields.MultiPolygonField')(blank=True, null=True, geography=True)),
))
db.send_create_signal('atlas', ['Country'])
# Adding model 'Region'
db.create_table('atlas_region', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('code', self.gf('django.db.models.fields.CharField')(max_length=2, db_index=True)),
('border', self.gf('django.contrib.gis.db.models.fields.MultiPolygonField')(blank=True, null=True, geography=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['atlas.Country'])),
))
db.send_create_signal('atlas', ['Region'])
# Adding unique constraint on 'Region', fields ['country', 'code']
db.create_unique('atlas_region', ['country_id', 'code'])
# Adding model 'City'
db.create_table('atlas_city', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('coordinates', self.gf('atlas.fields.CoordinateField')(blank=True, null=True, geography=True)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['atlas.Region'], null=True, blank=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['atlas.Country'])),
))
db.send_create_signal('atlas', ['City'])
# Adding model 'Location'
db.create_table('atlas_location', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('coordinates', self.gf('atlas.fields.CoordinateField')(blank=True, null=True, geography=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['atlas.Country'])),
('city', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['atlas.City'], null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(max_length=1024, null=True, blank=True)),
('photo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['photologue.Photo'], null=True, blank=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['category.Category'], null=True, blank=True)),
))
db.send_create_signal('atlas', ['Location'])
# create MySQL tables without spatial indices so that it will work with InnoDB
else:
if not db.dry_run:
sql = """
SET FOREIGN_KEY_CHECKS=0;
CREATE TABLE `atlas_country` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(50) NOT NULL, `country_code` varchar(2) NOT NULL UNIQUE, `border` MULTIPOLYGON NULL);
CREATE TABLE `atlas_region` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(128) NOT NULL, `code` varchar(2) NOT NULL, `border` MULTIPOLYGON NULL, `country_id` integer NOT NULL);
ALTER TABLE `atlas_region` ADD CONSTRAINT `atlas_region_country_id_545200d9bb67aa36_uniq` UNIQUE (`country_id`, `code`);
CREATE TABLE `atlas_city` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(128) NOT NULL, `coordinates` POINT NULL, `region_id` integer NULL, `country_id` integer NOT NULL);
CREATE TABLE `atlas_location` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(128) NOT NULL, `coordinates` POINT NULL, `country_id` integer NOT NULL, `city_id` integer NULL, `description` longtext NULL, `photo_id` integer NULL, `category_id` integer NULL);
CREATE INDEX `atlas_region_65da3d2c` ON `atlas_region` (`code`);
ALTER TABLE `atlas_region` ADD CONSTRAINT `country_id_refs_id_688446d03ef69106` FOREIGN KEY (`country_id`) REFERENCES `atlas_country` (`id`);
CREATE INDEX `atlas_region_534dd89` ON `atlas_region` (`country_id`);
CREATE INDEX `atlas_city_52094d6e` ON `atlas_city` (`name`);
ALTER TABLE `atlas_city` ADD CONSTRAINT `region_id_refs_id_4877f50311a5997e` FOREIGN KEY (`region_id`) REFERENCES `atlas_region` (`id`);
CREATE INDEX `atlas_city_f6a8b032` ON `atlas_city` (`region_id`);
ALTER TABLE `atlas_city` ADD CONSTRAINT `country_id_refs_id_51bbc9cfa50a0b7d` FOREIGN KEY (`country_id`) REFERENCES `atlas_country` (`id`);
CREATE INDEX `atlas_city_534dd89` ON `atlas_city` (`country_id`);
CREATE INDEX `atlas_location_52094d6e` ON `atlas_location` (`name`);
ALTER TABLE `atlas_location` ADD CONSTRAINT `country_id_refs_id_3a0bfa099c9ea063` FOREIGN KEY (`country_id`) REFERENCES `atlas_country` (`id`);
CREATE INDEX `atlas_location_534dd89` ON `atlas_location` (`country_id`);
ALTER TABLE `atlas_location` ADD CONSTRAINT `city_id_refs_id_136a507ad0769b15` FOREIGN KEY (`city_id`) REFERENCES `atlas_city` (`id`);
CREATE INDEX `atlas_location_586a73b5` ON `atlas_location` (`city_id`);
ALTER TABLE `atlas_location` ADD CONSTRAINT `photo_id_refs_id_764ca670382ba838` FOREIGN KEY (`photo_id`) REFERENCES `photologue_photo` (`id`);
CREATE INDEX `atlas_location_7c6c8bb1` ON `atlas_location` (`photo_id`);
ALTER TABLE `atlas_location` ADD CONSTRAINT `category_id_refs_id_71ba6eba4d7f8101` FOREIGN KEY (`category_id`) REFERENCES `category_category` (`id`);
CREATE INDEX `atlas_location_42dc49bc` ON `atlas_location` (`category_id`);
SET FOREIGN_KEY_CHECKS=1;"""
for s in sql.split(';'):
if s:
db.execute(s + ';')
db.send_create_signal('atlas', ['Country'])
db.send_create_signal('atlas', ['Region'])
db.send_create_signal('atlas', ['City'])
db.send_create_signal('atlas', ['Location'])
def backwards(self, orm):
# Removing unique constraint on 'Region', fields ['country', 'code']
db.delete_unique('atlas_region', ['country_id', 'code'])
# Deleting model 'Country'
db.delete_table('atlas_country')
# Deleting model 'Region'
db.delete_table('atlas_region')
# Deleting model 'City'
db.delete_table('atlas_city')
# Deleting model 'Location'
db.delete_table('atlas_location')
models = {
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'atlas.city': {
'Meta': {'ordering': "('name',)", 'object_name': 'City'},
'coordinates': ('atlas.fields.CoordinateField', [], {'blank': 'True', 'null': 'True', 'geography': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Region']", 'null': 'True', 'blank': 'True'})
},
'atlas.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country'},
'border': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'blank': 'True', 'null': 'True', 'geography': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'atlas.location': {
'Meta': {'object_name': 'Location'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.City']", 'null': 'True', 'blank': 'True'}),
'coordinates': ('atlas.fields.CoordinateField', [], {'blank': 'True', 'null': 'True', 'geography': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photologue.Photo']", 'null': 'True', 'blank': 'True'})
},
'atlas.region': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('country', 'code'),)", 'object_name': 'Region'},
'border': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'blank': 'True', 'null': 'True', 'geography': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'photologue.photo': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'Photo'},
'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tags': ('photologue.models.TagField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'title_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
}
}
complete_apps = ['atlas']
|
praekelt/django-atlas
|
atlas/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 13,923
|
"""Endpoint Device Types Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
class EndPointDeviceTypes(APIClassTemplate):
"""The EndPointDeviceTypes Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"fqName",
"iseId",
"overrides",
"overridable",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/endpointdevicetypes"
def __init__(self, fmc, **kwargs):
"""
Initialize EndPointDeviceTypes object.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for EndPointDeviceTypes class.")
self.parse_kwargs(**kwargs)
def post(self):
"""POST method for API for EndPointDeviceTypes not supported."""
logging.info("POST method for API for EndPointDeviceTypes not supported.")
pass
def put(self):
"""PUT method for API for EndPointDeviceTypes not supported."""
logging.info("PUT method for API for EndPointDeviceTypes not supported.")
pass
def delete(self):
"""DELETE method for API for EndPointDeviceTypes not supported."""
logging.info("DELETE method for API for EndPointDeviceTypes not supported.")
pass
|
daxm/fmcapi
|
fmcapi/api_objects/object_services/endpointdevicetypes.py
|
Python
|
bsd-3-clause
| 1,425
|
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db.models import get_model
from oscar.forms.fields import ExtendedURLField
Item = get_model('product', 'Item')
# Different model types for each type of promotion
class AbstractPromotion(models.Model):
"""
Abstract base promotion that defines the interface
that subclasses must implement.
"""
class Meta:
abstract = True
def template_name(self):
"""
Returns the template to use to render this
promotion.
"""
return 'promotions/%s.html' % self.__class__.__name__.lower()
def template_context(self, *args, **kwargs):
return {}
class RawHTML(AbstractPromotion):
"""
Simple promotion - just raw HTML
"""
name = models.CharField(_("Name"), max_length=128)
body = models.TextField(_("HTML"))
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = 'Raw HTML'
def __unicode__(self):
return self.name
class Image(AbstractPromotion):
"""
An image promotion is simply a named image which has an optional
link to another part of the site (or another site).
This can be used to model both banners and pods.
"""
name = models.CharField(_("Name"), max_length=128)
link_url = ExtendedURLField(blank=True, null=True, help_text="""This is
where this promotion links to""")
image = models.ImageField(upload_to=settings.OSCAR_PROMOTION_FOLDER, blank=True, null=True)
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.name
class MultiImage(AbstractPromotion):
"""
A multi-image promotion is simply a collection of image promotions
that are rendered in a specific way. This models things like
rotating banners.
"""
name = models.CharField(_("Name"), max_length=128)
images = models.ManyToManyField('promotions.Image', null=True, blank=True)
def __unicode__(self):
return self.name
class AbstractProductList(AbstractPromotion):
"""
Abstract superclass for promotions which are essentially a list
of products.
"""
name = models.CharField(_("Title"), max_length=255)
description = models.TextField(null=True, blank=True)
link_url = ExtendedURLField(blank=True, null=True)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
def __unicode__(self):
return self.name
class HandPickedProductList(AbstractProductList):
"""
A hand-picked product list is a list of manually selected
products.
"""
products = models.ManyToManyField('product.Item', through='OrderedProduct', blank=True, null=True)
class OrderedProduct(models.Model):
list = models.ForeignKey('promotions.HandPickedProductList')
product = models.ForeignKey('product.Item')
display_order = models.PositiveIntegerField(default=0)
class Meta:
ordering = ('display_order',)
class AutomaticProductList(AbstractProductList):
BESTSELLING, RECENTLY_ADDED = ('Bestselling', 'RecentlyAdded')
METHOD_CHOICES = (
(BESTSELLING, _("Bestselling products")),
(RECENTLY_ADDED, _("Recently added products")),
)
method = models.CharField(max_length=128, choices=METHOD_CHOICES)
num_products = models.PositiveSmallIntegerField(default=4)
def get_products(self):
if self.method == self.BESTSELLING:
return Item.objects.all().order_by('-score')[:self.num_products]
return Item.objects.all().order_by('-date_created')[:self.num_products]
class OrderedProductList(models.Model):
tabbed_block = models.ForeignKey('promotions.TabbedBlock')
list = models.ForeignKey('promotions.HandPickedProductList')
display_order = models.PositiveIntegerField(default=0)
class Meta:
ordering = ('display_order',)
class TabbedBlock(AbstractPromotion):
name = models.CharField(_("Title"), max_length=255)
tabs = models.ManyToManyField('promotions.HandPickedProductList', through='OrderedProductList', null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
# Linking models
class LinkedPromotion(models.Model):
# We use generic foreign key to link to a promotion model
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
position = models.CharField(_("Position"), max_length=100, help_text="Position on page")
display_order = models.PositiveIntegerField(default=0)
clicks = models.PositiveIntegerField(default=0)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
ordering = ['-clicks']
def record_click(self):
self.clicks += 1
self.save()
class PagePromotion(LinkedPromotion):
"""
A promotion embedded on a particular page.
"""
page_url = ExtendedURLField(max_length=128, db_index=True)
def __unicode__(self):
return u"%s on %s" % (self.content_object, self.page_url)
def get_link(self):
return reverse('promotions:page-click', kwargs={'page_promotion_id': self.id})
class KeywordPromotion(LinkedPromotion):
"""
A promotion linked to a specific keyword.
This can be used on a search results page to show promotions
linked to a particular keyword.
"""
keyword = models.CharField(_("Keyword"), max_length=200)
def get_link(self):
return reverse('promotions:keyword-click', kwargs={'keyword_promotion_id': self.id})
|
aykut/django-oscar
|
oscar/apps/promotions/models.py
|
Python
|
bsd-3-clause
| 6,141
|
from ... import options as opts
optimize_flags = {
opts.OptimizeValue.disable : '-O0',
opts.OptimizeValue.size : '-Osize',
opts.OptimizeValue.speed : '-O3',
opts.OptimizeValue.linktime: '-flto',
}
|
jimporter/bfg9000
|
bfg9000/tools/cc/flags.py
|
Python
|
bsd-3-clause
| 219
|
__author__ = "paul.tunison@kitware.com"
import mock
import nose.tools as ntools
import os
import unittest
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.tests import TEST_DATA_DIR
class TestDataFileElement (unittest.TestCase):
def test_init_filepath_abs(self):
fp = '/foo.txt'
d = DataFileElement(fp)
ntools.assert_equal(d._filepath, fp)
def test_init_relFilepath_normal(self):
# relative paths should be stored as absolute within the element
fp = 'foo.txt'
d = DataFileElement(fp)
ntools.assert_equal(d._filepath,
os.path.join(os.getcwd(), fp))
def test_content_type(self):
d = DataFileElement('foo.txt')
ntools.assert_equal(d.content_type(), 'text/plain')
@mock.patch('smqtk.representation.data_element.DataElement.write_temp')
def test_writeTempOverride(self, mock_DataElement_wt):
# no manual directory, should return the base filepath
expected_filepath = '/path/to/file.txt'
d = DataFileElement(expected_filepath)
fp = d.write_temp()
ntools.assert_false(mock_DataElement_wt.called)
ntools.assert_equal(expected_filepath, fp)
@mock.patch('smqtk.representation.data_element.DataElement.write_temp')
def test_writeTempOverride_sameDir(self, mock_DataElement_wt):
expected_filepath = '/path/to/file.txt'
target_dir = '/path/to'
d = DataFileElement(expected_filepath)
fp = d.write_temp(temp_dir=target_dir)
ntools.assert_false(mock_DataElement_wt.called)
ntools.assert_equal(fp, expected_filepath)
@mock.patch("smqtk.representation.data_element.file_element.osp.isfile")
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTempOverride_diffDir(self, mock_open, mock_os_open,
mock_os_close, mock_fcntl, mock_scd,
mock_isfile):
source_filepath = '/path/to/file.png'
target_dir = '/some/other/dir'
d = DataFileElement(source_filepath)
fp = d.write_temp(temp_dir=target_dir)
# Custom side-effect for os.path.isfile for simulated files
simulate = True
def osp_isfile_side_effect(path):
if simulate and path == fp:
return True
else:
return False
mock_isfile.side_effect = osp_isfile_side_effect
ntools.assert_not_equal(fp, source_filepath)
ntools.assert_equal(os.path.dirname(fp), target_dir)
# subsequent call to write temp should not invoke creation of a new file
fp2 = d.write_temp()
ntools.assert_equal(fp2, source_filepath)
# request in same dir should return same path as first request with that
# directory
fp3 = d.write_temp(target_dir)
ntools.assert_equal(fp, fp3)
# request different target dir
target2 = '/even/different/path'
fp4 = d.write_temp(target2)
ntools.assert_equal(os.path.dirname(fp4), target2)
ntools.assert_not_equal(fp, fp4)
ntools.assert_equal(len(d._temp_filepath_stack), 2)
# Restore normal os.path.isfile functionality
simulate = False
def test_cleanTemp(self):
# a write temp and clean temp should not affect original file
source_file = os.path.join(TEST_DATA_DIR, 'test_file.dat')
ntools.assert_true(os.path.isfile(source_file))
d = DataFileElement(source_file)
d.write_temp()
ntools.assert_equal(len(d._temp_filepath_stack), 0)
d.clean_temp()
ntools.assert_true(os.path.isfile(source_file))
def test_fromConfig(self):
fp = os.path.join(TEST_DATA_DIR, "Lenna.png")
c = {
"filepath": fp
}
df = DataFileElement.from_config(c)
ntools.assert_equal(df._filepath, fp)
def test_toConfig(self):
fp = os.path.join(TEST_DATA_DIR, "Lenna.png")
df = DataFileElement(fp)
c = df.get_config()
ntools.assert_equal(c['filepath'], fp)
def test_configuration(self):
fp = os.path.join(TEST_DATA_DIR, "Lenna.png")
default_config = DataFileElement.get_default_config()
ntools.assert_equal(default_config,
{'filepath': None})
default_config['filepath'] = fp
inst1 = DataFileElement.from_config(default_config)
ntools.assert_equal(default_config, inst1.get_config())
inst2 = DataFileElement.from_config(inst1.get_config())
ntools.assert_equal(inst1, inst2)
|
kfieldho/SMQTK
|
python/smqtk/tests/representation/DataElement/test_DataFileElement.py
|
Python
|
bsd-3-clause
| 4,839
|
"""Sparse accessor"""
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import find_common_type
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None):
self._parent = data
self._validate(data)
def _validate(self, data):
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
class SparseAccessor(BaseAccessor, PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def _validate(self, data):
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.array, name)
def _delegate_method(self, name, *args, **kwargs):
if name == "from_coo":
return self.from_coo(*args, **kwargs)
elif name == "to_coo":
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(
... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 2.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1.0
3 2.0
1 0 3.0
dtype: Sparse[float64, nan]
"""
from pandas import Series
from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
result = coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples(
... [
... (1, 2, "a", 0),
... (1, 2, "a", 1),
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
... (2, 1, "b", 1)
... ],
... names=["A", "B", "C", "D"],
... )
>>> s
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: float64
>>> ss = s.astype("Sparse")
>>> ss
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: Sparse[float64, nan]
>>> A, rows, columns = ss.sparse.to_coo(
... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 3.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
A, rows, columns = sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
return A, rows, columns
def to_dense(self):
"""
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
"""
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
"""
DataFrame accessor for sparse data.
.. versionadded:: 0.25.0
"""
def _validate(self, data):
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
@classmethod
def from_spmatrix(cls, data, index=None, columns=None):
"""
Create a new DataFrame from a scipy sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.spmatrix
Must be convertible to csc format.
index, columns : Index, optional
Row and column labels to use for the resulting DataFrame.
Defaults to a RangeIndex.
Returns
-------
DataFrame
Each column of the DataFrame is stored as a
:class:`arrays.SparseArray`.
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.eye(3)
>>> pd.DataFrame.sparse.from_spmatrix(mat)
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas._libs.sparse import IntIndex
from pandas import DataFrame
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
# We need to make sure indices are sorted, as we create
# IntIndex with no input validation (i.e. check_integrity=False ).
# Indices may already be sorted in scipy in which case this adds
# a small overhead.
data.sort_indices()
indices = data.indices
indptr = data.indptr
array_data = data.data
dtype = SparseDtype(array_data.dtype, 0)
arrays = []
for i in range(n_columns):
sl = slice(indptr[i], indptr[i + 1])
idx = IntIndex(n_rows, indices[sl], check_integrity=False)
arr = SparseArray._simple_new(array_data[sl], idx, dtype)
arrays.append(arr)
return DataFrame._from_arrays(
arrays, columns=columns, index=index, verify_integrity=False
)
def to_dense(self):
"""
Convert a DataFrame with sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
DataFrame
A DataFrame with the same values stored as dense arrays.
Examples
--------
>>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
>>> df.sparse.to_dense()
A
0 0
1 1
2 0
"""
from pandas import DataFrame
data = {k: v.array.to_dense() for k, v in self._parent.items()}
return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.25.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
import_optional_dependency("scipy")
from scipy.sparse import coo_matrix
dtype = find_common_type(self._parent.dtypes.to_list())
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, datas = [], [], []
for col, name in enumerate(self._parent):
s = self._parent[name]
row = s.array.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.array.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self._parent.shape)
@property
def density(self) -> float:
"""
Ratio of non-sparse points to total (dense) data points.
"""
return np.mean([column.array.density for _, column in self._parent.items()])
@staticmethod
def _prep_index(data, index, columns):
from pandas.core.indexes.api import ensure_index
import pandas.core.indexes.base as ibase
N, K = data.shape
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
if len(columns) != K:
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
|
jreback/pandas
|
pandas/core/arrays/sparse/accessor.py
|
Python
|
bsd-3-clause
| 11,453
|
"""
This class is used by test_pageobjects
"""
from cumulusci.robotframework.pageobjects import BasePage
from cumulusci.robotframework.pageobjects import pageobject
@pageobject(page_type="Test", object_name="Bar__c")
class BarTestPage(BasePage):
def bar_keyword_1(self, message):
self.builtin.log(message)
return f"bar keyword 1: {message}"
def bar_keyword_2(self, message):
self.builtin.log(message)
return f"bar keyword 2: {message}"
|
SalesforceFoundation/CumulusCI
|
cumulusci/core/tests/BarTestPage.py
|
Python
|
bsd-3-clause
| 479
|
""" test to_datetime """
import calendar
from collections import deque
from datetime import (
datetime,
timedelta,
)
from decimal import Decimal
import locale
from dateutil.parser import parse
from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
from pandas._libs import tslib
from pandas._libs.tslibs import (
iNaT,
parsing,
)
from pandas.errors import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
)
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_datetime64_ns_dtype
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
NaT,
Series,
Timestamp,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
from pandas.core.tools import datetimes as tools
from pandas.core.tools.datetimes import start_caching_at
class TestTimeConversionFormats:
@pytest.mark.parametrize("readonly", [True, False])
def test_to_datetime_readonly(self, readonly):
# GH#34857
arr = np.array([], dtype=object)
if readonly:
arr.setflags(write=False)
result = to_datetime(arr)
expected = to_datetime([])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format(self, cache):
values = ["1/1/2000", "1/2/2000", "1/3/2000"]
results1 = [Timestamp("20000101"), Timestamp("20000201"), Timestamp("20000301")]
results2 = [Timestamp("20000101"), Timestamp("20000102"), Timestamp("20000103")]
for vals, expecteds in [
(values, (Index(results1), Index(results2))),
(Series(values), (Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2])),
]:
for i, fmt in enumerate(["%d/%m/%Y", "%m/%d/%Y"]):
result = to_datetime(vals, format=fmt, cache=cache)
expected = expecteds[i]
if isinstance(expected, Series):
tm.assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
assert result == expected
else:
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_YYYYMMDD(self, cache):
s = Series([19801222, 19801222] + [19810105] * 5)
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
result = to_datetime(s.apply(str), format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# with NaT
expected = Series(
[Timestamp("19801222"), Timestamp("19801222")] + [Timestamp("19810105")] * 5
)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = "nat"
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
result = to_datetime(s, format="%Y%m%d", errors="ignore", cache=cache)
expected = Series(
[datetime(2012, 12, 31), datetime(2014, 12, 31), datetime(9999, 12, 31)],
dtype=object,
)
tm.assert_series_equal(result, expected)
result = to_datetime(s, format="%Y%m%d", errors="coerce", cache=cache)
expected = Series(["20121231", "20141231", "NaT"], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s",
[
# Null values with Strings
["19801222", "20010112", None],
["19801222", "20010112", np.nan],
["19801222", "20010112", NaT],
["19801222", "20010112", "NaT"],
# Null values with Integers
[19801222, 20010112, None],
[19801222, 20010112, np.nan],
[19801222, 20010112, NaT],
[19801222, 20010112, "NaT"],
],
)
def test_to_datetime_format_YYYYMMDD_with_none(self, input_s):
# GH 30011
# format='%Y%m%d'
# with None
expected = Series([Timestamp("19801222"), Timestamp("20010112"), NaT])
result = Series(to_datetime(input_s, format="%Y%m%d"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s, expected",
[
# NaN before strings with invalid date values
[
Series(["19801222", np.nan, "20010012", "10019999"]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN after strings with invalid date values
[
Series(["19801222", "20010012", "10019999", np.nan]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN before integers with invalid date values
[
Series([20190813, np.nan, 20010012, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
# NaN after integers with invalid date values
[
Series([20190813, 20010012, np.nan, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
],
)
def test_to_datetime_format_YYYYMMDD_overflow(self, input_s, expected):
# GH 25512
# format='%Y%m%d', errors='coerce'
result = to_datetime(input_s, format="%Y%m%d", errors="coerce")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data, format, expected",
[
([pd.NA], "%Y%m%d%H%M%S", DatetimeIndex(["NaT"])),
([pd.NA], None, DatetimeIndex(["NaT"])),
(
[pd.NA, "20210202202020"],
"%Y%m%d%H%M%S",
DatetimeIndex(["NaT", "2021-02-02 20:20:20"]),
),
(["201010", pd.NA], "%y%m%d", DatetimeIndex(["2020-10-10", "NaT"])),
(["201010", pd.NA], "%d%m%y", DatetimeIndex(["2010-10-20", "NaT"])),
(["201010", pd.NA], None, DatetimeIndex(["2010-10-20", "NaT"])),
([None, np.nan, pd.NA], None, DatetimeIndex(["NaT", "NaT", "NaT"])),
([None, np.nan, pd.NA], "%Y%m%d", DatetimeIndex(["NaT", "NaT", "NaT"])),
],
)
def test_to_datetime_with_NA(self, data, format, expected):
# GH#42957
result = to_datetime(data, format=format)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_integer(self, cache):
# GH 10178
s = Series([2000, 2001, 2002])
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y", cache=cache)
tm.assert_series_equal(result, expected)
s = Series([200001, 200105, 200206])
expected = Series([Timestamp(x[:4] + "-" + x[4:]) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m", cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"int_date, expected",
[
# valid date, length == 8
[20121030, datetime(2012, 10, 30)],
# short valid date, length == 6
[199934, datetime(1999, 3, 4)],
# long integer date partially parsed to datetime(2012,1,1), length > 8
[2012010101, 2012010101],
# invalid date partially parsed to datetime(2012,9,9), length == 8
[20129930, 20129930],
# short integer date partially parsed to datetime(2012,9,9), length < 8
[2012993, 2012993],
# short invalid date, length == 4
[2121, 2121],
],
)
def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected):
# GH 26583
result = to_datetime(int_date, format="%Y%m%d", errors="ignore")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_microsecond(self, cache):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
val = f"01-{month_abbr}-2011 00:00:01.978"
format = "%d-%b-%Y %H:%M:%S.%f"
result = to_datetime(val, format=format, cache=cache)
exp = datetime.strptime(val, format)
assert result == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_time(self, cache):
data = [
["01/10/2010 15:20", "%m/%d/%Y %H:%M", Timestamp("2010-01-10 15:20")],
["01/10/2010 05:43", "%m/%d/%Y %I:%M", Timestamp("2010-01-10 05:43")],
[
"01/10/2010 13:56:01",
"%m/%d/%Y %H:%M:%S",
Timestamp("2010-01-10 13:56:01"),
] # ,
# ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 20:14')],
# ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 07:40')],
# ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
# Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_non_exact(self, cache):
# GH 10834
# 8904
# exact kw
s = Series(
["19MAY11", "foobar19MAY11", "19MAY11:00:00:00", "19MAY11 00:00:00Z"]
)
result = to_datetime(s, format="%d%b%y", exact=False, cache=cache)
expected = to_datetime(
s.str.extract(r"(\d+\w+\d+)", expand=False), format="%d%b%y", cache=cache
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_parse_nanoseconds_with_formula(self, cache):
# GH8989
# truncating the nanoseconds when a format was provided
for v in [
"2012-01-01 09:00:00.000000001",
"2012-01-01 09:00:00.000001",
"2012-01-01 09:00:00.001",
"2012-01-01 09:00:00.001000",
"2012-01-01 09:00:00.001000000",
]:
expected = to_datetime(v, cache=cache)
result = to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f", cache=cache)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_weeks(self, cache):
data = [
["2009324", "%Y%W%w", Timestamp("2009-08-13")],
["2013020", "%Y%U%w", Timestamp("2013-01-13")],
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@pytest.mark.parametrize(
"fmt,dates,expected_dates",
[
[
"%Y-%m-%d %H:%M:%S %Z",
["2010-01-01 12:00:00 UTC"] * 2,
[Timestamp("2010-01-01 12:00:00", tz="UTC")] * 2,
],
[
"%Y-%m-%d %H:%M:%S %Z",
[
"2010-01-01 12:00:00 UTC",
"2010-01-01 12:00:00 GMT",
"2010-01-01 12:00:00 US/Pacific",
],
[
Timestamp("2010-01-01 12:00:00", tz="UTC"),
Timestamp("2010-01-01 12:00:00", tz="GMT"),
Timestamp("2010-01-01 12:00:00", tz="US/Pacific"),
],
],
[
"%Y-%m-%d %H:%M:%S%z",
["2010-01-01 12:00:00+0100"] * 2,
[Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100"] * 2,
[Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100", "2010-01-01 12:00:00 -0100"],
[
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60)),
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(-60)),
],
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 Z", "2010-01-01 12:00:00 Z"],
[
Timestamp(
"2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)
), # pytz coerces to UTC
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)),
],
],
],
)
def test_to_datetime_parse_tzname_or_tzoffset(self, fmt, dates, expected_dates):
# GH 13486
result = to_datetime(dates, format=fmt)
expected = Index(expected_dates)
tm.assert_equal(result, expected)
def test_to_datetime_parse_tzname_or_tzoffset_different_tz_to_utc(self):
# GH 32792
dates = [
"2010-01-01 12:00:00 +0100",
"2010-01-01 12:00:00 -0100",
"2010-01-01 12:00:00 +0300",
"2010-01-01 12:00:00 +0400",
]
expected_dates = [
"2010-01-01 11:00:00+00:00",
"2010-01-01 13:00:00+00:00",
"2010-01-01 09:00:00+00:00",
"2010-01-01 08:00:00+00:00",
]
fmt = "%Y-%m-%d %H:%M:%S %z"
result = to_datetime(dates, format=fmt, utc=True)
expected = DatetimeIndex(expected_dates)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"offset", ["+0", "-1foo", "UTCbar", ":10", "+01:000:01", ""]
)
def test_to_datetime_parse_timezone_malformed(self, offset):
fmt = "%Y-%m-%d %H:%M:%S %z"
date = "2010-01-01 12:00:00 " + offset
msg = "does not match format|unconverted data remains"
with pytest.raises(ValueError, match=msg):
to_datetime([date], format=fmt)
def test_to_datetime_parse_timezone_keeps_name(self):
# GH 21697
fmt = "%Y-%m-%d %H:%M:%S %z"
arg = Index(["2010-01-01 12:00:00 Z"], name="foo")
result = to_datetime(arg, format=fmt)
expected = DatetimeIndex(["2010-01-01 12:00:00"], tz="UTC", name="foo")
tm.assert_index_equal(result, expected)
class TestToDatetime:
@pytest.mark.parametrize(
"s, _format, dt",
[
["2015-1-1", "%G-%V-%u", datetime(2014, 12, 29, 0, 0)],
["2015-1-4", "%G-%V-%u", datetime(2015, 1, 1, 0, 0)],
["2015-1-7", "%G-%V-%u", datetime(2015, 1, 4, 0, 0)],
],
)
def test_to_datetime_iso_week_year_format(self, s, _format, dt):
# See GH#16607
assert to_datetime(s, format=_format) == dt
@pytest.mark.parametrize(
"msg, s, _format",
[
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 50",
"%Y %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 51",
"%G %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Monday",
"%G %A",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Mon",
"%G %a",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %w",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %u",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"2051",
"%G",
],
[
"Day of the year directive '%j' is not compatible with ISO year "
"directive '%G'. Use '%Y' instead.",
"1999 51 6 256",
"%G %V %u %j",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sunday",
"%Y %V %A",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sun",
"%Y %V %a",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %w",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %u",
],
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"20",
"%V",
],
],
)
def test_error_iso_week_year(self, msg, s, _format):
# See GH#16607
# This test checks for errors thrown when giving the wrong format
# However, as discussed on PR#25541, overriding the locale
# causes a different error to be thrown due to the format being
# locale specific, but the test data is in english.
# Therefore, the tests only run when locale is not overwritten,
# as a sort of solution to this problem.
if locale.getlocale() != ("zh_CN", "UTF-8") and locale.getlocale() != (
"it_IT",
"UTF-8",
):
with pytest.raises(ValueError, match=msg):
to_datetime(s, format=_format)
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_to_datetime_dtarr(self, tz):
# DatetimeArray
dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz)
arr = DatetimeArray(dti)
result = to_datetime(arr)
assert result is arr
result = to_datetime(arr)
assert result is arr
def test_to_datetime_pydatetime(self):
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
def test_to_datetime_YYYYMMDD(self):
actual = to_datetime("20080115")
assert actual == datetime(2008, 1, 15)
def test_to_datetime_unparseable_ignore(self):
# unparsable
s = "Month 1, 1999"
assert to_datetime(s, errors="ignore") == s
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_now(self):
# See GH#18666
with tm.set_timezone("US/Eastern"):
npnow = np.datetime64("now").astype("datetime64[ns]")
pdnow = to_datetime("now")
pdnow2 = to_datetime(["now"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdnow.value - npnow.astype(np.int64)) < 1e10
assert abs(pdnow2.value - npnow.astype(np.int64)) < 1e10
assert pdnow.tzinfo is None
assert pdnow2.tzinfo is None
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_today(self):
# See GH#18666
# Test with one timezone far ahead of UTC and another far behind, so
# one of these will _almost_ always be in a different day from UTC.
# Unfortunately this test between 12 and 1 AM Samoa time
# this both of these timezones _and_ UTC will all be in the same day,
# so this test will not detect the regression introduced in #18666.
with tm.set_timezone("Pacific/Auckland"): # 12-13 hours ahead of UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = to_datetime("today")
pdtoday2 = to_datetime(["today"])[0]
tstoday = Timestamp("today")
tstoday2 = Timestamp.today()
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert abs(pdtoday.value - tstoday.value) < 1e10
assert abs(pdtoday.value - tstoday2.value) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
with tm.set_timezone("US/Samoa"): # 11 hours behind UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = to_datetime("today")
pdtoday2 = to_datetime(["today"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
def test_to_datetime_today_now_unicode_bytes(self):
to_datetime(["now"])
to_datetime(["today"])
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s(self, cache):
in_bound_dts = [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")]
for dt in in_bound_dts:
assert to_datetime(dt, cache=cache) == Timestamp(dt)
@pytest.mark.parametrize(
"dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")]
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):
msg = f"Out of bounds nanosecond timestamp: {dt}"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(dt, errors="raise")
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp(dt)
assert to_datetime(dt, errors="coerce", cache=cache) is NaT
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize("unit", ["s", "D"])
def test_to_datetime_array_of_dt64s(self, cache, unit):
# https://github.com/pandas-dev/pandas/issues/31491
# Need at least 50 to ensure cache is used.
dts = [
np.datetime64("2000-01-01", unit),
np.datetime64("2000-01-02", unit),
] * 30
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_index_equal(
to_datetime(dts, cache=cache),
DatetimeIndex([Timestamp(x).asm8 for x in dts]),
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64("9999-01-01")]
msg = "Out of bounds nanosecond timestamp: 9999-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(dts_with_oob, errors="raise")
tm.assert_index_equal(
to_datetime(dts_with_oob, errors="coerce", cache=cache),
DatetimeIndex(
[Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30
+ [NaT],
),
)
# With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
tm.assert_index_equal(
to_datetime(dts_with_oob, errors="ignore", cache=cache),
Index([dt.item() for dt in dts_with_oob]),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz(self, cache):
# xref 8260
# uniform returns a DatetimeIndex
arr = [
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
result = to_datetime(arr, cache=cache)
expected = DatetimeIndex(
["2013-01-01 13:00:00", "2013-01-02 14:00:00"], tz="US/Pacific"
)
tm.assert_index_equal(result, expected)
# mixed tzs will raise
arr = [
Timestamp("2013-01-01 13:00:00", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00", tz="US/Eastern"),
]
msg = (
"Tz-aware datetime.datetime cannot be "
"converted to datetime64 unless utc=True"
)
with pytest.raises(ValueError, match=msg):
to_datetime(arr, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_different_offsets(self, cache):
# inspired by asv timeseries.ToDatetimeNONISO8601 benchmark
# see GH-26097 for more
ts_string_1 = "March 1, 2018 12:00:00+0400"
ts_string_2 = "March 1, 2018 12:00:00+0500"
arr = [ts_string_1] * 5 + [ts_string_2] * 5
expected = Index([parse(x) for x in arr])
result = to_datetime(arr, cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz_pytz(self, cache):
# see gh-8260
us_eastern = pytz.timezone("US/Eastern")
arr = np.array(
[
us_eastern.localize(
datetime(year=2000, month=1, day=1, hour=3, minute=0)
),
us_eastern.localize(
datetime(year=2000, month=6, day=1, hour=3, minute=0)
),
],
dtype=object,
)
result = to_datetime(arr, utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"init_constructor, end_constructor, test_method",
[
(Index, DatetimeIndex, tm.assert_index_equal),
(list, DatetimeIndex, tm.assert_index_equal),
(np.array, DatetimeIndex, tm.assert_index_equal),
(Series, Series, tm.assert_series_equal),
],
)
def test_to_datetime_utc_true(
self, cache, init_constructor, end_constructor, test_method
):
# See gh-11934 & gh-6415
data = ["20100102 121314", "20100102 121315"]
expected_data = [
Timestamp("2010-01-02 12:13:14", tz="utc"),
Timestamp("2010-01-02 12:13:15", tz="utc"),
]
result = to_datetime(
init_constructor(data), format="%Y%m%d %H%M%S", utc=True, cache=cache
)
expected = end_constructor(expected_data)
test_method(result, expected)
# Test scalar case as well
for scalar, expected in zip(data, expected_data):
result = to_datetime(scalar, format="%Y%m%d %H%M%S", utc=True, cache=cache)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_single_value(self, cache):
# GH 15760 UTC=True with Series
ts = 1.5e18
result = to_datetime(Series([ts]), utc=True, cache=cache)
expected = Series([Timestamp(ts, tz="utc")])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):
ts = "2013-01-01 00:00:00-01:00"
expected_ts = "2013-01-01 01:00:00"
data = Series([ts] * 3)
result = to_datetime(data, utc=True, cache=cache)
expected = Series([Timestamp(expected_ts, tz="utc")] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"date, dtype",
[
("2013-01-01 01:00:00", "datetime64[ns]"),
("2013-01-01 01:00:00", "datetime64[ns, UTC]"),
],
)
def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype):
expected = Series([Timestamp("2013-01-01 01:00:00", tz="UTC")])
result = to_datetime(Series([date], dtype=dtype), utc=True, cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@td.skip_if_no("psycopg2")
def test_to_datetime_tz_psycopg2(self, cache):
# xref 8260
import psycopg2
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)
arr = np.array(
[
datetime(2000, 1, 1, 3, 0, tzinfo=tz1),
datetime(2000, 6, 1, 3, 0, tzinfo=tz2),
],
dtype=object,
)
result = to_datetime(arr, errors="coerce", utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
# dtype coercion
i = DatetimeIndex(
["2000-01-01 08:00:00"],
tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None),
)
assert is_datetime64_ns_dtype(i)
# tz coercion
result = to_datetime(i, errors="coerce", cache=cache)
tm.assert_index_equal(result, i)
result = to_datetime(i, errors="coerce", utc=True, cache=cache)
expected = DatetimeIndex(["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_datetime_bool(self, cache):
# GH13176
msg = r"dtype bool cannot be converted to datetime64\[ns\]"
with pytest.raises(TypeError, match=msg):
to_datetime(False)
assert to_datetime(False, errors="coerce", cache=cache) is NaT
assert to_datetime(False, errors="ignore", cache=cache) is False
with pytest.raises(TypeError, match=msg):
to_datetime(True)
assert to_datetime(True, errors="coerce", cache=cache) is NaT
assert to_datetime(True, errors="ignore", cache=cache) is True
msg = f"{type(cache)} is not convertible to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([False, datetime.today()], cache=cache)
with pytest.raises(TypeError, match=msg):
to_datetime(["20130101", True], cache=cache)
tm.assert_index_equal(
to_datetime([0, False, NaT, 0.0], errors="coerce", cache=cache),
DatetimeIndex(
[to_datetime(0, cache=cache), NaT, NaT, to_datetime(0, cache=cache)]
),
)
def test_datetime_invalid_datatype(self):
# GH13176
msg = "is not convertible to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime(bool)
with pytest.raises(TypeError, match=msg):
to_datetime(to_datetime)
@pytest.mark.parametrize("value", ["a", "00:01:99"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_scalar(self, value, format, infer):
# GH24763
res = to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is NaT
msg = (
"is a bad directive in format|"
"second must be in 0..59|"
"Given date string not likely a datetime"
)
with pytest.raises(ValueError, match=msg):
to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("value", ["3000/12/11 00:00:00"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_outofbounds_scalar(self, value, format, infer):
# GH24763
res = to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is NaT
if format is not None:
msg = "is a bad directive in format|Out of bounds nanosecond timestamp"
with pytest.raises(ValueError, match=msg):
to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
else:
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("values", [["a"], ["00:01:99"], ["a", "b", "99:00:00"]])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_index(self, values, format, infer):
# GH24763
res = to_datetime(
values, errors="ignore", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, Index(values))
res = to_datetime(
values, errors="coerce", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, DatetimeIndex([NaT] * len(values)))
msg = (
"is a bad directive in format|"
"Given date string not likely a datetime|"
"second must be in 0..59"
)
with pytest.raises(ValueError, match=msg):
to_datetime(
values, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
@pytest.mark.parametrize("constructor", [list, tuple, np.array, Index, deque])
def test_to_datetime_cache(self, utc, format, constructor):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = constructor(test_dates)
result = to_datetime(data, utc=utc, format=format, cache=True)
expected = to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike",
[
(deque([Timestamp("2010-06-02 09:30:00")] * 51)),
([Timestamp("2010-06-02 09:30:00")] * 51),
(tuple([Timestamp("2010-06-02 09:30:00")] * 51)),
],
)
def test_no_slicing_errors_in_should_cache(self, listlike):
# GH 29403
assert tools.should_cache(listlike) is True
def test_to_datetime_from_deque(self):
# GH 29403
result = to_datetime(deque([Timestamp("2010-06-02 09:30:00")] * 51))
expected = to_datetime([Timestamp("2010-06-02 09:30:00")] * 51)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
def test_to_datetime_cache_series(self, utc, format):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = Series(test_dates)
result = to_datetime(data, utc=utc, format=format, cache=True)
expected = to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_series_equal(result, expected)
def test_to_datetime_cache_scalar(self):
date = "20130101 00:00:00"
result = to_datetime(date, cache=True)
expected = Timestamp("20130101 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"datetimelikes,expected_values",
(
(
(None, np.nan) + (NaT,) * start_caching_at,
(NaT,) * (start_caching_at + 2),
),
(
(None, Timestamp("2012-07-26")) + (NaT,) * start_caching_at,
(NaT, Timestamp("2012-07-26")) + (NaT,) * start_caching_at,
),
(
(None,)
+ (NaT,) * start_caching_at
+ ("2012 July 26", Timestamp("2012-07-26")),
(NaT,) * (start_caching_at + 1)
+ (Timestamp("2012-07-26"), Timestamp("2012-07-26")),
),
),
)
def test_convert_object_to_datetime_with_cache(
self, datetimelikes, expected_values
):
# GH#39882
ser = Series(
datetimelikes,
dtype="object",
)
result_series = to_datetime(ser, errors="coerce")
expected_series = Series(
expected_values,
dtype="datetime64[ns]",
)
tm.assert_series_equal(result_series, expected_series)
@pytest.mark.parametrize(
"date, format",
[
("2017-20", "%Y-%W"),
("20 Sunday", "%W %A"),
("20 Sun", "%W %a"),
("2017-21", "%Y-%U"),
("20 Sunday", "%U %A"),
("20 Sun", "%U %a"),
],
)
def test_week_without_day_and_calendar_year(self, date, format):
# GH16774
msg = "Cannot use '%W' or '%U' without day and year"
with pytest.raises(ValueError, match=msg):
to_datetime(date, format=format)
def test_to_datetime_coerce(self):
# GH 26122
ts_strings = [
"March 1, 2018 12:00:00+0400",
"March 1, 2018 12:00:00+0500",
"20100240",
]
result = to_datetime(ts_strings, errors="coerce")
expected = Index(
[
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 14400)),
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 18000)),
NaT,
]
)
tm.assert_index_equal(result, expected)
def test_to_datetime_coerce_malformed(self):
# GH 28299
ts_strings = ["200622-12-31", "111111-24-11"]
result = to_datetime(ts_strings, errors="coerce")
expected = Index([NaT, NaT])
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_same_offset(self):
# GH 17697, 11736
ts_str = "2015-11-18 15:30:00+05:30"
result = to_datetime(ts_str)
expected = Timestamp(ts_str)
assert result == expected
expected = DatetimeIndex([Timestamp(ts_str)] * 2)
result = to_datetime([ts_str] * 2)
tm.assert_index_equal(result, expected)
result = DatetimeIndex([ts_str] * 2)
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_different_offsets(self):
# GH 17697, 11736
ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT]
result = to_datetime(ts_strings)
expected = np.array(
[
datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 16, 30, tzinfo=tzoffset(None, 23400)),
NaT,
],
dtype=object,
)
# GH 21864
expected = Index(expected)
tm.assert_index_equal(result, expected)
result = to_datetime(ts_strings, utc=True)
expected = DatetimeIndex(
[Timestamp(2015, 11, 18, 10), Timestamp(2015, 11, 18, 10), NaT], tz="UTC"
)
tm.assert_index_equal(result, expected)
def test_iso8601_strings_mixed_offsets_with_naive(self):
# GH 24992
result = to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+12:00",
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+06:00",
"2018-11-28T00:00:00",
],
utc=True,
)
expected = to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-27T12:00:00",
"2018-11-28T00:00:00",
"2018-11-27T18:00:00",
"2018-11-28T00:00:00",
],
utc=True,
)
tm.assert_index_equal(result, expected)
items = ["2018-11-28T00:00:00+12:00", "2018-11-28T00:00:00"]
result = to_datetime(items, utc=True)
expected = to_datetime(list(reversed(items)), utc=True)[::-1]
tm.assert_index_equal(result, expected)
def test_mixed_offsets_with_native_datetime_raises(self):
# GH 25978
vals = [
"nan",
Timestamp("1990-01-01"),
"2015-03-14T16:15:14.123-08:00",
"2019-03-04T21:56:32.620-07:00",
None,
]
ser = Series(vals)
assert all(ser[i] is vals[i] for i in range(len(vals))) # GH#40111
mixed = to_datetime(ser)
expected = Series(
[
"NaT",
Timestamp("1990-01-01"),
Timestamp("2015-03-14T16:15:14.123-08:00").to_pydatetime(),
Timestamp("2019-03-04T21:56:32.620-07:00").to_pydatetime(),
None,
],
dtype=object,
)
tm.assert_series_equal(mixed, expected)
with pytest.raises(ValueError, match="Tz-aware datetime.datetime"):
to_datetime(mixed)
def test_non_iso_strings_with_tz_offset(self):
result = to_datetime(["March 1, 2018 12:00:00+0400"] * 2)
expected = DatetimeIndex(
[datetime(2018, 3, 1, 12, tzinfo=pytz.FixedOffset(240))] * 2
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"ts, expected",
[
(Timestamp("2018-01-01"), Timestamp("2018-01-01", tz="UTC")),
(
Timestamp("2018-01-01", tz="US/Pacific"),
Timestamp("2018-01-01 08:00", tz="UTC"),
),
],
)
def test_timestamp_utc_true(self, ts, expected):
# GH 24415
result = to_datetime(ts, utc=True)
assert result == expected
@pytest.mark.parametrize("dt_str", ["00010101", "13000101", "30000101", "99990101"])
def test_to_datetime_with_format_out_of_bounds(self, dt_str):
# GH 9107
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(dt_str, format="%Y%m%d")
def test_to_datetime_utc(self):
arr = np.array([parse("2012-06-13T01:39:00Z")], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_to_datetime_fixed_offset(self):
from pandas.tests.indexes.datetimes.test_timezones import fixed_off
dates = [
datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off),
]
result = to_datetime(dates)
assert result.tz == fixed_off
class TestToDatetimeUnit:
@pytest.mark.parametrize("cache", [True, False])
def test_unit(self, cache):
# GH 11758
# test proper behavior with errors
msg = "cannot specify both format and unit"
with pytest.raises(ValueError, match=msg):
to_datetime([1], unit="D", format="%Y%m%d", cache=cache)
values = [11111111, 1, 1.0, iNaT, NaT, np.nan, "NaT", ""]
result = to_datetime(values, unit="D", errors="ignore", cache=cache)
expected = Index(
[
11111111,
Timestamp("1970-01-02"),
Timestamp("1970-01-02"),
NaT,
NaT,
NaT,
NaT,
NaT,
],
dtype=object,
)
tm.assert_index_equal(result, expected)
result = to_datetime(values, unit="D", errors="coerce", cache=cache)
expected = DatetimeIndex(
["NaT", "1970-01-02", "1970-01-02", "NaT", "NaT", "NaT", "NaT", "NaT"]
)
tm.assert_index_equal(result, expected)
msg = "cannot convert input 11111111 with the unit 'D'"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(values, unit="D", errors="raise", cache=cache)
values = [1420043460000, iNaT, NaT, np.nan, "NaT"]
result = to_datetime(values, errors="ignore", unit="s", cache=cache)
expected = Index([1420043460000, NaT, NaT, NaT, NaT], dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, errors="coerce", unit="s", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "NaT", "NaT", "NaT"])
tm.assert_index_equal(result, expected)
msg = "cannot convert input 1420043460000 with the unit 's'"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(values, errors="raise", unit="s", cache=cache)
# if we have a string, then we raise a ValueError
# and NOT an OutOfBoundsDatetime
for val in ["foo", Timestamp("20130101")]:
try:
to_datetime(val, errors="raise", unit="s", cache=cache)
except OutOfBoundsDatetime as err:
raise AssertionError("incorrect exception raised") from err
except ValueError:
pass
@pytest.mark.parametrize("cache", [True, False])
def test_unit_consistency(self, cache):
# consistency of conversions
expected = Timestamp("1970-05-09 14:25:11")
result = to_datetime(11111111, unit="s", errors="raise", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = to_datetime(11111111, unit="s", errors="coerce", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = to_datetime(11111111, unit="s", errors="ignore", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_with_numeric(self, cache):
# GH 13180
# coercions from floats/ints are ok
expected = DatetimeIndex(["2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr1 = [1.434692e18, 1.432766e18]
arr2 = np.array(arr1).astype("int64")
for errors in ["ignore", "raise", "coerce"]:
result = to_datetime(arr1, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
result = to_datetime(arr2, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
# if we have ints/strings
expected = DatetimeIndex(["NaT", "2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr = ["foo", 1.434692e18, 1.432766e18]
result = to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(
["2015-06-19 05:33:20", "2015-05-27 22:33:20", "NaT", "NaT"]
)
arr = [1.434692e18, 1.432766e18, "foo", "NaT"]
result = to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_mixed(self, cache):
# mixed integers/datetimes
expected = DatetimeIndex(["2013-01-01", "NaT", "NaT"])
arr = [Timestamp("20130101"), 1.434692e18, 1.432766e18]
result = to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
msg = "mixed datetimes and integers in passed array"
with pytest.raises(ValueError, match=msg):
to_datetime(arr, errors="raise", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "2013-01-01"])
arr = [1.434692e18, 1.432766e18, Timestamp("20130101")]
result = to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
to_datetime(arr, errors="raise", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_rounding(self, cache):
# GH 14156 & GH 20445: argument will incur floating point errors
# but no premature rounding
result = to_datetime(1434743731.8770001, unit="s", cache=cache)
expected = Timestamp("2015-06-19 19:55:31.877000192")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_unit_ignore_keeps_name(self, cache):
# GH 21697
expected = Index([15e9] * 2, name="name")
result = to_datetime(expected, errors="ignore", unit="s", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe(self, cache):
df = DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [58, 59],
"second": [10, 11],
"ms": [1, 1],
"us": [2, 2],
"ns": [3, 3],
}
)
result = to_datetime(
{"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache
)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:0:00")]
)
tm.assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[["year", "month", "day"]].to_dict(), cache=cache)
tm.assert_series_equal(result, expected)
# dict but with constructable
df2 = df[["year", "month", "day"]].to_dict()
df2["month"] = 2
result = to_datetime(df2, cache=cache)
expected2 = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160205 00:0:00")]
)
tm.assert_series_equal(result, expected2)
# unit mappings
units = [
{
"year": "years",
"month": "months",
"day": "days",
"hour": "hours",
"minute": "minutes",
"second": "seconds",
},
{
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
},
]
for d in units:
result = to_datetime(df[list(d.keys())].rename(columns=d), cache=cache)
expected = Series(
[Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")]
)
tm.assert_series_equal(result, expected)
d = {
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
"ms": "ms",
"us": "us",
"ns": "ns",
}
result = to_datetime(df.rename(columns=d), cache=cache)
expected = Series(
[
Timestamp("20150204 06:58:10.001002003"),
Timestamp("20160305 07:59:11.001002003"),
]
)
tm.assert_series_equal(result, expected)
# coerce back to int
result = to_datetime(df.astype(str), cache=cache)
tm.assert_series_equal(result, expected)
# passing coerce
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
msg = (
"cannot assemble the datetimes: time data .+ does not "
r"match format '%Y%m%d' \(match\)"
)
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
result = to_datetime(df2, errors="coerce", cache=cache)
expected = Series([Timestamp("20150204 00:00:00"), NaT])
tm.assert_series_equal(result, expected)
# extra columns
msg = r"extra keys have been passed to the datetime assemblage: \[foo\]"
with pytest.raises(ValueError, match=msg):
df2 = df.copy()
df2["foo"] = 1
to_datetime(df2, cache=cache)
# not enough
msg = (
r"to assemble mappings requires at least that \[year, month, "
r"day\] be specified: \[.+\] is missing"
)
for c in [
["year"],
["year", "month"],
["year", "month", "second"],
["month", "day"],
["year", "day", "second"],
]:
with pytest.raises(ValueError, match=msg):
to_datetime(df[c], cache=cache)
# duplicates
msg = "cannot assemble with duplicate keys"
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
df2.columns = ["year", "year", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
df2 = DataFrame(
{"year": [2015, 2016], "month": [2, 20], "day": [4, 5], "hour": [4, 5]}
)
df2.columns = ["year", "month", "day", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe_dtypes(self, cache):
# #13451
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
# int16
result = to_datetime(df.astype("int16"), cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# mixed dtypes
df["month"] = df["month"].astype("int8")
df["day"] = df["day"].astype("int8")
result = to_datetime(df, cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"year": [2000, 2001], "month": [1.5, 1], "day": [1, 1]})
msg = "cannot assemble the datetimes: unconverted data remains: 1"
with pytest.raises(ValueError, match=msg):
to_datetime(df, cache=cache)
def test_dataframe_utc_true(self):
# GH 23760
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
result = to_datetime(df, utc=True)
expected = Series(
np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[ns]")
).dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_to_datetime_errors_ignore_utc_true(self):
# GH 23758
result = to_datetime([1], unit="s", utc=True, errors="ignore")
expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC")
tm.assert_index_equal(result, expected)
# TODO: this is moved from tests.series.test_timeseries, may be redundant
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([epoch + t for t in range(20)])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
# GH13834
s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t)
for t in np.arange(0, 2, 0.25)
]
+ [NaT]
)
# GH20455 argument will incur floating point errors but no premature rounding
result = result.round("ms")
tm.assert_series_equal(result, expected)
s = pd.concat(
[Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])],
ignore_index=True,
)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
result = to_datetime([1, 2, "NaT", NaT, np.nan], unit="D")
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3
)
tm.assert_index_equal(result, expected)
msg = "non convertible value foo with the unit 'D'"
with pytest.raises(ValueError, match=msg):
to_datetime([1, 2, "foo"], unit="D")
msg = "cannot convert input 111111111 with the unit 'D'"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime([1, 2, 111111111], unit="D")
# coerce we can process
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1
)
result = to_datetime([1, 2, "foo"], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
result = to_datetime([1, 2, 111111111], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
class TestToDatetimeMisc:
def test_to_datetime_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(arr)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601(self, cache):
result = to_datetime(["2012-01-01 00:00:00"], cache=cache)
exp = Timestamp("2012-01-01 00:00:00")
assert result[0] == exp
result = to_datetime(["20121001"], cache=cache) # bad iso 8601
exp = Timestamp("2012-10-01")
assert result[0] == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_default(self, cache):
rs = to_datetime("2001", cache=cache)
xp = datetime(2001, 1, 1)
assert rs == xp
# dayfirst is essentially broken
# to_datetime('01-13-2012', dayfirst=True)
# pytest.raises(ValueError, to_datetime('01-13-2012',
# dayfirst=True))
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_on_datetime64_series(self, cache):
# #2699
s = Series(date_range("1/1/2000", periods=10))
result = to_datetime(s, cache=cache)
assert result[0] == s[0]
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_space_in_series(self, cache):
# GH 6428
s = Series(["10/18/2006", "10/18/2008", " "])
msg = r"(\(')?String does not contain a date(:', ' '\))?"
with pytest.raises(ValueError, match=msg):
to_datetime(s, errors="raise", cache=cache)
result_coerce = to_datetime(s, errors="coerce", cache=cache)
expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
result_ignore = to_datetime(s, errors="ignore", cache=cache)
tm.assert_series_equal(result_ignore, s)
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_apply(self, cache):
# this is only locale tested with US/None locales
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(["May 04", "Jun 02", "Dec 11"], index=[1, 2, 3])
expected = to_datetime(td, format="%b %y", cache=cache)
result = td.apply(to_datetime, format="%b %y", cache=cache)
tm.assert_series_equal(result, expected)
td = Series(["May 04", "Jun 02", ""], index=[1, 2, 3])
msg = r"time data '' does not match format '%b %y' \(match\)"
with pytest.raises(ValueError, match=msg):
to_datetime(td, format="%b %y", errors="raise", cache=cache)
with pytest.raises(ValueError, match=msg):
td.apply(to_datetime, format="%b %y", errors="raise", cache=cache)
expected = to_datetime(td, format="%b %y", errors="coerce", cache=cache)
result = td.apply(
lambda x: to_datetime(x, format="%b %y", errors="coerce", cache=cache)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_types(self, cache):
# empty string
result = to_datetime("", cache=cache)
assert result is NaT
result = to_datetime(["", ""], cache=cache)
assert isna(result).all()
# ints
result = Timestamp(0)
expected = to_datetime(0, cache=cache)
assert result == expected
# GH 3888 (strings)
expected = to_datetime(["2012"], cache=cache)[0]
result = to_datetime("2012", cache=cache)
assert result == expected
# array = ['2012','20120101','20120101 12:01:01']
array = ["20120101", "20120101 12:01:01"]
expected = list(to_datetime(array, cache=cache))
result = [Timestamp(date_str) for date_str in array]
tm.assert_almost_equal(result, expected)
# currently fails ###
# result = Timestamp('2012')
# expected = to_datetime('2012')
# assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_unprocessable_input(self, cache):
# GH 4928
# GH 21864
result = to_datetime([1, "1"], errors="ignore", cache=cache)
expected = Index(np.array([1, "1"], dtype="O"))
tm.assert_equal(result, expected)
msg = "invalid string coercion to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([1, "1"], errors="raise", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_unhashable_input(self, cache):
series = Series([["a"]] * 100)
result = to_datetime(series, errors="ignore", cache=cache)
tm.assert_series_equal(series, result)
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view("M8[us]")
as_obj = scalar.astype("O")
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype("O")
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range("1/1/2000", periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
msg = "|".join(
[
"Python int too large to convert to C long",
"long too big to convert",
"int too big to convert",
]
)
with pytest.raises(OutOfBoundsTimedelta, match=msg):
date_range(start="1/1/1700", freq="B", periods=100000)
@pytest.mark.parametrize("cache", [True, False])
def test_string_na_nat_conversion(self, cache):
# GH #999, #858
strings = np.array(
["1/1/2000", "1/2/2000", np.nan, "1/4/2000, 12:34:56"], dtype=object
)
expected = np.empty(4, dtype="M8[ns]")
for i, val in enumerate(strings):
if isna(val):
expected[i] = iNaT
else:
expected[i] = parse(val)
result = tslib.array_to_datetime(strings)[0]
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings, cache=cache)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
malformed = np.array(["1/100/2000", np.nan], dtype=object)
# GH 10636, default is now 'raise'
msg = r"Unknown string format:|day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
result = to_datetime(malformed, errors="ignore", cache=cache)
# GH 21864
expected = Index(malformed)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
idx = ["a", "b", "c", "d", "e"]
series = Series(
["1/1/2000", np.nan, "1/3/2000", np.nan, "1/5/2000"], index=idx, name="foo"
)
dseries = Series(
[
to_datetime("1/1/2000", cache=cache),
np.nan,
to_datetime("1/3/2000", cache=cache),
np.nan,
to_datetime("1/5/2000", cache=cache),
],
index=idx,
name="foo",
)
result = to_datetime(series, cache=cache)
dresult = to_datetime(dseries, cache=cache)
expected = Series(np.empty(5, dtype="M8[ns]"), index=idx)
for i in range(5):
x = series[i]
if isna(x):
expected[i] = NaT
else:
expected[i] = to_datetime(x, cache=cache)
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == "foo"
tm.assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == "foo"
@pytest.mark.parametrize(
"dtype",
[
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
@pytest.mark.parametrize("cache", [True, False])
def test_dti_constructor_numpy_timeunits(self, cache, dtype):
# GH 9114
base = to_datetime(["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache)
values = base.values.astype(dtype)
tm.assert_index_equal(DatetimeIndex(values), base)
tm.assert_index_equal(to_datetime(values, cache=cache), base)
@pytest.mark.parametrize("cache", [True, False])
def test_dayfirst(self, cache):
# GH 5917
arr = ["10/02/2014", "11/02/2014", "12/02/2014"]
expected = DatetimeIndex(
[datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)]
)
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True, cache=cache)
idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
def test_dayfirst_warnings(self):
# GH 12585
warning_msg_day_first = (
"Parsing '31/12/2014' in DD/MM/YYYY format. Provide "
"format or specify infer_datetime_format=True for consistent parsing."
)
warning_msg_month_first = (
"Parsing '03/30/2011' in MM/DD/YYYY format. Provide "
"format or specify infer_datetime_format=True for consistent parsing."
)
# CASE 1: valid input
arr = ["31/12/2014", "10/03/2011"]
expected_consistent = DatetimeIndex(
["2014-12-31", "2011-03-10"], dtype="datetime64[ns]", freq=None
)
expected_inconsistent = DatetimeIndex(
["2014-12-31", "2011-10-03"], dtype="datetime64[ns]", freq=None
)
# A. dayfirst arg correct, no warning
res1 = to_datetime(arr, dayfirst=True)
tm.assert_index_equal(expected_consistent, res1)
# B. dayfirst arg incorrect, warning + incorrect output
with tm.assert_produces_warning(UserWarning, match=warning_msg_day_first):
res2 = to_datetime(arr, dayfirst=False)
tm.assert_index_equal(expected_inconsistent, res2)
# C. dayfirst default arg, same as B
with tm.assert_produces_warning(UserWarning, match=warning_msg_day_first):
res3 = to_datetime(arr, dayfirst=False)
tm.assert_index_equal(expected_inconsistent, res3)
# D. infer_datetime_format=True overrides dayfirst default
# no warning + correct result
res4 = to_datetime(arr, infer_datetime_format=True)
tm.assert_index_equal(expected_consistent, res4)
# CASE 2: invalid input
# cannot consistently process with single format
# warnings *always* raised
arr = ["31/12/2014", "03/30/2011"]
# first in DD/MM/YYYY, second in MM/DD/YYYY
expected = DatetimeIndex(
["2014-12-31", "2011-03-30"], dtype="datetime64[ns]", freq=None
)
# A. use dayfirst=True
with tm.assert_produces_warning(UserWarning, match=warning_msg_month_first):
res5 = to_datetime(arr, dayfirst=True)
tm.assert_index_equal(expected, res5)
# B. use dayfirst=False
with tm.assert_produces_warning(UserWarning, match=warning_msg_day_first):
res6 = to_datetime(arr, dayfirst=False)
tm.assert_index_equal(expected, res6)
# C. use dayfirst default arg, same as B
with tm.assert_produces_warning(UserWarning, match=warning_msg_day_first):
res7 = to_datetime(arr, dayfirst=False)
tm.assert_index_equal(expected, res7)
# D. use infer_datetime_format=True
with tm.assert_produces_warning(UserWarning, match=warning_msg_day_first):
res8 = to_datetime(arr, infer_datetime_format=True)
tm.assert_index_equal(expected, res8)
@pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray])
def test_to_datetime_dta_tz(self, klass):
# GH#27733
dti = date_range("2015-04-05", periods=3).rename("foo")
expected = dti.tz_localize("UTC")
obj = klass(dti)
expected = klass(expected)
result = to_datetime(obj, utc=True)
tm.assert_equal(result, expected)
class TestGuessDatetimeFormat:
@td.skip_if_not_us_locale
def test_guess_datetime_format_for_array(self):
expected_format = "%Y-%m-%d %H:%M:%S.%f"
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype="O"),
np.array([np.nan, np.nan, dt_string], dtype="O"),
np.array([dt_string, "random_string"], dtype="O"),
]
for test_array in test_arrays:
assert tools._guess_datetime_format_for_array(test_array) == expected_format
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array([np.nan, np.nan, np.nan], dtype="O")
)
assert format_for_string_of_nans is None
class TestToDatetimeInferFormat:
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
s = Series(date_range("20000101", periods=50, freq="H"))
test_formats = ["%m-%d-%Y", "%m/%d/%Y %H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S.%f"]
for test_format in test_formats:
s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
with_format = to_datetime(s_as_dt_strings, format=test_format, cache=cache)
no_infer = to_datetime(
s_as_dt_strings, infer_datetime_format=False, cache=cache
)
yes_infer = to_datetime(
s_as_dt_strings, infer_datetime_format=True, cache=cache
)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
tm.assert_series_equal(with_format, no_infer)
tm.assert_series_equal(no_infer, yes_infer)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
s = Series(
np.array(
["01/01/2011 00:00:00", "01-02-2011 00:00:00", "2011-01-03T00:00:00"]
)
)
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(
to_datetime(s, infer_datetime_format=False, cache=cache),
to_datetime(s, infer_datetime_format=True, cache=cache),
)
s = Series(np.array(["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"]))
tm.assert_series_equal(
to_datetime(s, infer_datetime_format=False, cache=cache),
to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):
s = Series(
np.array(
["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan],
dtype=object,
)
)
tm.assert_series_equal(
to_datetime(s, infer_datetime_format=False, cache=cache),
to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
s = Series(
np.array(
[
np.nan,
np.nan,
"01/01/2011 00:00:00",
"01/02/2011 00:00:00",
"01/03/2011 00:00:00",
],
dtype=object,
)
)
tm.assert_series_equal(
to_datetime(s, infer_datetime_format=False, cache=cache),
to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize(
"tz_name, offset", [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)]
)
def test_infer_datetime_format_tz_name(self, tz_name, offset):
# GH 33133
s = Series([f"2019-02-02 08:07:13 {tz_name}"])
result = to_datetime(s, infer_datetime_format=True)
expected = Series(
[Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ts,zero_tz,is_utc",
[
("2019-02-02 08:07:13", "Z", True),
("2019-02-02 08:07:13", "", False),
("2019-02-02 08:07:13.012345", "Z", True),
("2019-02-02 08:07:13.012345", "", False),
],
)
def test_infer_datetime_format_zero_tz(self, ts, zero_tz, is_utc):
# GH 41047
s = Series([ts + zero_tz])
result = to_datetime(s, infer_datetime_format=True)
tz = pytz.utc if is_utc else None
expected = Series([Timestamp(ts, tz=tz)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601_noleading_0s(self, cache):
# GH 11871
s = Series(["2014-1-1", "2014-2-2", "2015-3-3"])
expected = Series(
[
Timestamp("2014-01-01"),
Timestamp("2014-02-02"),
Timestamp("2015-03-03"),
]
)
tm.assert_series_equal(to_datetime(s, cache=cache), expected)
tm.assert_series_equal(to_datetime(s, format="%Y-%m-%d", cache=cache), expected)
class TestDaysInMonth:
# tests for issue #10154
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_coerce(self, cache):
assert isna(to_datetime("2015-02-29", errors="coerce", cache=cache))
assert isna(
to_datetime("2015-02-29", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-02-32", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-04-31", format="%Y-%m-%d", errors="coerce", cache=cache)
)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_raise(self, cache):
msg = "day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", cache=cache)
msg = "time data 2015-02-29 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-02-32 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-32", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-04-31 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-04-31", errors="raise", format="%Y-%m-%d", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_ignore(self, cache):
assert to_datetime("2015-02-29", errors="ignore", cache=cache) == "2015-02-29"
assert (
to_datetime("2015-02-29", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-29"
)
assert (
to_datetime("2015-02-32", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-32"
)
assert (
to_datetime("2015-04-31", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-04-31"
)
class TestDatetimeParsingWrappers:
@pytest.mark.parametrize(
"date_str,expected",
list(
{
"2011-01-01": datetime(2011, 1, 1),
"2Q2005": datetime(2005, 4, 1),
"2Q05": datetime(2005, 4, 1),
"2005Q1": datetime(2005, 1, 1),
"05Q1": datetime(2005, 1, 1),
"2011Q3": datetime(2011, 7, 1),
"11Q3": datetime(2011, 7, 1),
"3Q2011": datetime(2011, 7, 1),
"3Q11": datetime(2011, 7, 1),
# quarterly without space
"2000Q4": datetime(2000, 10, 1),
"00Q4": datetime(2000, 10, 1),
"4Q2000": datetime(2000, 10, 1),
"4Q00": datetime(2000, 10, 1),
"2000q4": datetime(2000, 10, 1),
"2000-Q4": datetime(2000, 10, 1),
"00-Q4": datetime(2000, 10, 1),
"4Q-2000": datetime(2000, 10, 1),
"4Q-00": datetime(2000, 10, 1),
"00q4": datetime(2000, 10, 1),
"2005": datetime(2005, 1, 1),
"2005-11": datetime(2005, 11, 1),
"2005 11": datetime(2005, 11, 1),
"11-2005": datetime(2005, 11, 1),
"11 2005": datetime(2005, 11, 1),
"200511": datetime(2020, 5, 11),
"20051109": datetime(2005, 11, 9),
"20051109 10:15": datetime(2005, 11, 9, 10, 15),
"20051109 08H": datetime(2005, 11, 9, 8, 0),
"2005-11-09 10:15": datetime(2005, 11, 9, 10, 15),
"2005-11-09 08H": datetime(2005, 11, 9, 8, 0),
"2005/11/09 10:15": datetime(2005, 11, 9, 10, 15),
"2005/11/09 08H": datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime(2003, 9, 25, 10, 36, 28),
"Thu Sep 25 2003": datetime(2003, 9, 25),
"Sep 25 2003": datetime(2003, 9, 25),
"January 1 2014": datetime(2014, 1, 1),
# GHE10537
"2014-06": datetime(2014, 6, 1),
"06-2014": datetime(2014, 6, 1),
"2014-6": datetime(2014, 6, 1),
"6-2014": datetime(2014, 6, 1),
"20010101 12": datetime(2001, 1, 1, 12),
"20010101 1234": datetime(2001, 1, 1, 12, 34),
"20010101 123456": datetime(2001, 1, 1, 12, 34, 56),
}.items()
),
)
@pytest.mark.parametrize("cache", [True, False])
def test_parsers(self, date_str, expected, cache):
# dateutil >= 2.5.0 defaults to yearfirst=True
# https://github.com/dateutil/dateutil/issues/217
yearfirst = True
result1, _ = parsing.parse_time_string(date_str, yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(
np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache
)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst)
assert result7 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_na_values_with_cache(
self, cache, unique_nulls_fixture, unique_nulls_fixture2
):
# GH22305
expected = Index([NaT, NaT], dtype="datetime64[ns]")
result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2], cache=cache)
tm.assert_index_equal(result, expected)
def test_parsers_nat(self):
# Test that each of several string-accepting methods return pd.NaT
result1, _ = parsing.parse_time_string("NaT")
result2 = to_datetime("NaT")
result3 = Timestamp("NaT")
result4 = DatetimeIndex(["NaT"])[0]
assert result1 is NaT
assert result2 is NaT
assert result3 is NaT
assert result4 is NaT
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_dayfirst_yearfirst(self, cache):
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# str : dayfirst, yearfirst, expected
cases = {
"10-11-12": [
(False, False, datetime(2012, 10, 11)),
(True, False, datetime(2012, 11, 10)),
(False, True, datetime(2010, 11, 12)),
(True, True, datetime(2010, 12, 11)),
],
"20/12/21": [
(False, False, datetime(2021, 12, 20)),
(True, False, datetime(2021, 12, 20)),
(False, True, datetime(2020, 12, 21)),
(True, True, datetime(2020, 12, 21)),
],
}
for date_str, values in cases.items():
for dayfirst, yearfirst, expected in values:
# compare with dateutil result
dateutil_result = parse(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
assert dateutil_result == expected
result1, _ = parsing.parse_time_string(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(
date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache
)
result4 = DatetimeIndex(
[date_str], dayfirst=dayfirst, yearfirst=yearfirst
)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_timestring(self, cache):
# must be the same as dateutil result
cases = {
"10:15": (parse("10:15"), datetime(1, 1, 1, 10, 15)),
"9:05": (parse("9:05"), datetime(1, 1, 1, 9, 5)),
}
for date_str, (exp_now, exp_def) in cases.items():
result1, _ = parsing.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"dt_string, tz, dt_string_repr",
[
(
"2013-01-01 05:45+0545",
pytz.FixedOffset(345),
"Timestamp('2013-01-01 05:45:00+0545', tz='pytz.FixedOffset(345)')",
),
(
"2013-01-01 05:30+0530",
pytz.FixedOffset(330),
"Timestamp('2013-01-01 05:30:00+0530', tz='pytz.FixedOffset(330)')",
),
],
)
def test_parsers_timezone_minute_offsets_roundtrip(
self, cache, dt_string, tz, dt_string_repr
):
# GH11708
base = to_datetime("2013-01-01 00:00:00", cache=cache)
base = base.tz_localize("UTC").tz_convert(tz)
dt_time = to_datetime(dt_string, cache=cache)
assert base == dt_time
assert dt_string_repr == repr(dt_time)
@pytest.fixture(params=["D", "s", "ms", "us", "ns"])
def units(request):
"""Day and some time units.
* D
* s
* ms
* us
* ns
"""
return request.param
@pytest.fixture
def epoch_1960():
"""Timestamp at 1960-01-01."""
return Timestamp("1960-01-01")
@pytest.fixture
def units_from_epochs():
return list(range(5))
@pytest.fixture(params=["timestamp", "pydatetime", "datetime64", "str_1960"])
def epochs(epoch_1960, request):
"""Timestamp at 1960-01-01 in various forms.
* Timestamp
* datetime.datetime
* numpy.datetime64
* str
"""
assert request.param in {"timestamp", "pydatetime", "datetime64", "str_1960"}
if request.param == "timestamp":
return epoch_1960
elif request.param == "pydatetime":
return epoch_1960.to_pydatetime()
elif request.param == "datetime64":
return epoch_1960.to_datetime64()
else:
return str(epoch_1960)
@pytest.fixture
def julian_dates():
return date_range("2014-1-1", periods=10).to_julian_date().values
class TestOrigin:
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
# for origin as julian
result = Series(to_datetime(julian_dates, unit="D", origin="julian"))
expected = Series(
to_datetime(julian_dates - Timestamp(0).to_julian_date(), unit="D")
)
tm.assert_series_equal(result, expected)
result = Series(to_datetime([0, 1, 2], unit="D", origin="unix"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
# default
result = Series(to_datetime([0, 1, 2], unit="D"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
def test_julian_round_trip(self):
result = to_datetime(2456658, origin="julian", unit="D")
assert result.to_julian_date() == 2456658
# out-of-bounds
msg = "1 is Out of Bounds for origin='julian'"
with pytest.raises(ValueError, match=msg):
to_datetime(1, origin="julian", unit="D")
def test_invalid_unit(self, units, julian_dates):
# checking for invalid combination of origin='julian' and unit != D
if units != "D":
msg = "unit must be 'D' for origin='julian'"
with pytest.raises(ValueError, match=msg):
to_datetime(julian_dates, unit=units, origin="julian")
def test_invalid_origin(self):
# need to have a numeric specified
msg = "it must be numeric with a unit specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2005-01-01", origin="1960-01-01")
with pytest.raises(ValueError, match=msg):
to_datetime("2005-01-01", origin="1960-01-01", unit="D")
def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
expected = Series(
[pd.Timedelta(x, unit=units) + epoch_1960 for x in units_from_epochs]
)
result = Series(to_datetime(units_from_epochs, unit=units, origin=epochs))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"origin, exc",
[
("random_string", ValueError),
("epoch", ValueError),
("13-24-1990", ValueError),
(datetime(1, 1, 1), OutOfBoundsDatetime),
],
)
def test_invalid_origins(self, origin, exc, units, units_from_epochs):
msg = f"origin {origin} (is Out of Bounds|cannot be converted to a Timestamp)"
with pytest.raises(exc, match=msg):
to_datetime(units_from_epochs, unit=units, origin=origin)
def test_invalid_origins_tzinfo(self):
# GH16842
with pytest.raises(ValueError, match="must be tz-naive"):
to_datetime(1, unit="D", origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
@pytest.mark.parametrize("format", [None, "%Y-%m-%d %H:%M:%S"])
def test_to_datetime_out_of_bounds_with_format_arg(self, format):
# see gh-23830
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime("2417-10-27 00:00:00", format=format)
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
result = to_datetime(200 * 365, unit="D")
expected = Timestamp("2169-11-13 00:00:00")
assert result == expected
result = to_datetime(200 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2069-11-13 00:00:00")
assert result == expected
result = to_datetime(300 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2169-10-20 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"offset,utc,exp",
[
["Z", True, "2019-01-01T00:00:00.000Z"],
["Z", None, "2019-01-01T00:00:00.000Z"],
["-01:00", True, "2019-01-01T01:00:00.000Z"],
["-01:00", None, "2019-01-01T00:00:00.000-01:00"],
],
)
def test_arg_tz_ns_unit(self, offset, utc, exp):
# GH 25546
arg = "2019-01-01T00:00:00.000" + offset
result = to_datetime([arg], unit="ns", utc=utc)
expected = to_datetime([exp])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike,do_caching",
[([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True)],
)
def test_should_cache(listlike, do_caching):
assert (
tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7)
== do_caching
)
@pytest.mark.parametrize(
"unique_share,check_count, err_message",
[
(0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"),
(10, 2, r"unique_share must be in next bounds: \(0; 1\)"),
],
)
def test_should_cache_errors(unique_share, check_count, err_message):
arg = [5] * 10
with pytest.raises(AssertionError, match=err_message):
tools.should_cache(arg, unique_share, check_count)
def test_nullable_integer_to_datetime():
# Test for #30050
ser = Series([1, 2, None, 2 ** 61, None])
ser = ser.astype("Int64")
ser_copy = ser.copy()
res = to_datetime(ser, unit="ns")
expected = Series(
[
np.datetime64("1970-01-01 00:00:00.000000001"),
np.datetime64("1970-01-01 00:00:00.000000002"),
np.datetime64("NaT"),
np.datetime64("2043-01-25 23:56:49.213693952"),
np.datetime64("NaT"),
]
)
tm.assert_series_equal(res, expected)
# Check that ser isn't mutated
tm.assert_series_equal(ser, ser_copy)
@pytest.mark.parametrize("klass", [np.array, list])
def test_na_to_datetime(nulls_fixture, klass):
if isinstance(nulls_fixture, Decimal):
with pytest.raises(TypeError, match="not convertible to datetime"):
to_datetime(klass([nulls_fixture]))
else:
result = to_datetime(klass([nulls_fixture]))
assert result[0] is NaT
def test_empty_string_datetime_coerce__format():
# GH13044
td = Series(["03/24/2016", "03/25/2016", ""])
format = "%m/%d/%Y"
# coerce empty string to pd.NaT
result = to_datetime(td, format=format, errors="coerce")
expected = Series(["2016-03-24", "2016-03-25", NaT], dtype="datetime64[ns]")
tm.assert_series_equal(expected, result)
# raise an exception in case a format is given
with pytest.raises(ValueError, match="does not match format"):
result = to_datetime(td, format=format, errors="raise")
# don't raise an exception in case no format is given
result = to_datetime(td, errors="raise")
tm.assert_series_equal(result, expected)
def test_empty_string_datetime_coerce__unit():
# GH13044
# coerce empty string to pd.NaT
result = to_datetime([1, ""], unit="s", errors="coerce")
expected = DatetimeIndex(["1970-01-01 00:00:01", "NaT"], dtype="datetime64[ns]")
tm.assert_index_equal(expected, result)
# verify that no exception is raised even when errors='raise' is set
result = to_datetime([1, ""], unit="s", errors="raise")
tm.assert_index_equal(expected, result)
@td.skip_if_no("xarray")
def test_xarray_coerce_unit():
# GH44053
import xarray as xr
arr = xr.DataArray([1, 2, 3])
result = to_datetime(arr, unit="ns")
expected = DatetimeIndex(
[
"1970-01-01 00:00:00.000000001",
"1970-01-01 00:00:00.000000002",
"1970-01-01 00:00:00.000000003",
],
dtype="datetime64[ns]",
freq=None,
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_monotonic_increasing_index(cache):
# GH28238
cstart = start_caching_at
times = date_range(Timestamp("1980"), periods=cstart, freq="YS")
times = times.to_frame(index=False, name="DT").sample(n=cstart, random_state=1)
times.index = times.index.to_series().astype(float) / 1000
result = to_datetime(times.iloc[:, 0], cache=cache)
expected = times.iloc[:, 0]
tm.assert_series_equal(result, expected)
|
jorisvandenbossche/pandas
|
pandas/tests/tools/test_to_datetime.py
|
Python
|
bsd-3-clause
| 100,614
|
from contextlib import suppress
import logging
import warnings
import weakref
from tornado.httpserver import HTTPServer
import tlz
import dask
from .comm import get_tcp_server_address
from .comm import get_address_host
from .core import Server
from .http.routing import RoutingApplication
from .versions import get_versions
from .utils import DequeHandler, clean_dashboard_address
class ServerNode(Server):
"""
Base class for server nodes in a distributed cluster.
"""
# TODO factor out security, listening, services, etc. here
# XXX avoid inheriting from Server? there is some large potential for confusion
# between base and derived attribute namespaces...
def versions(self, comm=None, packages=None):
return get_versions(packages=packages)
def start_services(self, default_listen_ip):
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
for k, v in self.service_specs.items():
listen_ip = None
if isinstance(k, tuple):
k, port = k
else:
port = 0
if isinstance(port, str):
port = port.split(":")
if isinstance(port, (tuple, list)):
if len(port) == 2:
listen_ip, port = (port[0], int(port[1]))
elif len(port) == 1:
[listen_ip], port = port, 0
else:
raise ValueError(port)
if isinstance(v, tuple):
v, kwargs = v
else:
kwargs = {}
try:
service = v(self, io_loop=self.loop, **kwargs)
service.listen(
(listen_ip if listen_ip is not None else default_listen_ip, port)
)
self.services[k] = service
except Exception as e:
warnings.warn(
"\nCould not launch service '%s' on port %s. " % (k, port)
+ "Got the following message:\n\n"
+ str(e),
stacklevel=3,
)
def stop_services(self):
for service in self.services.values():
service.stop()
@property
def service_ports(self):
return {k: v.port for k, v in self.services.items()}
def _setup_logging(self, logger):
self._deque_handler = DequeHandler(
n=dask.config.get("distributed.admin.log-length")
)
self._deque_handler.setFormatter(
logging.Formatter(dask.config.get("distributed.admin.log-format"))
)
logger.addHandler(self._deque_handler)
weakref.finalize(self, logger.removeHandler, self._deque_handler)
def get_logs(self, comm=None, n=None):
deque_handler = self._deque_handler
if n is None:
L = list(deque_handler.deque)
else:
L = deque_handler.deque
L = [L[-i] for i in range(min(n, len(L)))]
return [(msg.levelname, deque_handler.format(msg)) for msg in L]
def start_http_server(
self, routes, dashboard_address, default_port=0, ssl_options=None
):
""" This creates an HTTP Server running on this node """
self.http_application = RoutingApplication(routes)
# TLS configuration
tls_key = dask.config.get("distributed.scheduler.dashboard.tls.key")
tls_cert = dask.config.get("distributed.scheduler.dashboard.tls.cert")
tls_ca_file = dask.config.get("distributed.scheduler.dashboard.tls.ca-file")
if tls_cert:
import ssl
ssl_options = ssl.create_default_context(
cafile=tls_ca_file, purpose=ssl.Purpose.SERVER_AUTH
)
ssl_options.load_cert_chain(tls_cert, keyfile=tls_key)
# We don't care about auth here, just encryption
ssl_options.check_hostname = False
ssl_options.verify_mode = ssl.CERT_NONE
self.http_server = HTTPServer(self.http_application, ssl_options=ssl_options)
http_address = clean_dashboard_address(dashboard_address or default_port)
if not http_address["address"]:
address = self._start_address
if isinstance(address, (list, tuple)):
address = address[0]
if address:
with suppress(ValueError):
http_address["address"] = get_address_host(address)
change_port = False
retries_left = 3
while True:
try:
if not change_port:
self.http_server.listen(**http_address)
else:
self.http_server.listen(**tlz.merge(http_address, {"port": 0}))
break
except Exception:
change_port = True
retries_left = retries_left - 1
if retries_left < 1:
raise
self.http_server.port = get_tcp_server_address(self.http_server)[1]
self.services["dashboard"] = self.http_server
if change_port and dashboard_address:
warnings.warn(
"Port {} is already in use.\n"
"Perhaps you already have a cluster running?\n"
"Hosting the HTTP server on port {} instead".format(
http_address["port"], self.http_server.port
)
)
|
blaze/distributed
|
distributed/node.py
|
Python
|
bsd-3-clause
| 5,452
|
"""
Metrics API module: http://metrics-api.wikimedia.org/
Defines the API which exposes metrics on Wikipedia users. The metrics
are defined at https://meta.wikimedia.org/wiki/Research:Metrics.
"""
from user_metrics.utils import nested_import
from user_metrics.config import settings
from user_metrics.api.broker import FileBroker
from user_metrics.config import settings as conf
BROKER_HOME = conf.__data_file_dir__
REQUEST_BROKER_TARGET = BROKER_HOME + 'request_broker.txt'
RESPONSE_BROKER_TARGET = BROKER_HOME + 'response_broker.txt'
PROCESS_BROKER_TARGET = BROKER_HOME + 'process_broker.txt'
umapi_broker_context = FileBroker()
query_mod = nested_import(settings.__query_module__)
# Error codes for web requests
# ############################
error_codes = {
-1: 'Metrics API HTTP request error.',
0: 'Job already running.',
1: 'Badly Formatted timestamp',
2: 'Could not locate stored request.',
3: 'Could not find User ID.',
4: 'Bad metric name.',
5: 'Failed to retrieve users.',
6: 'Job is currently queued.',
}
class MetricsAPIError(Exception):
""" Basic exception class for UserMetric types """
def __init__(self, message="Error processing API request.",
error_code=-1):
self.error_code_index = error_code
Exception.__init__(self, message)
@property
def error_code(self):
return self.error_code_index
|
wikimedia/analytics-user-metrics
|
user_metrics/api/__init__.py
|
Python
|
bsd-3-clause
| 1,422
|
"""
resourceview.py
Contains administrative views for working with resources.
"""
from datetime import date
from admin_helpers import *
from sqlalchemy import or_, not_, func
from flask import current_app, redirect, flash, request, url_for
from flask.ext.admin import BaseView, expose
from flask.ext.admin.actions import action
from flask.ext.admin.contrib.sqla import ModelView
from wtforms import DecimalField, validators
import geopy
from geopy.exc import *
from remedy.rad.models import Resource, Category
from remedy.rad.geocoder import Geocoder
class ResourceView(AdminAuthMixin, ModelView):
"""
An administrative view for working with resources.
"""
column_list = ('name', 'organization',
'address', 'url',
'source', 'last_updated')
column_default_sort = 'name'
column_searchable_list = ('name','description','organization','notes',)
column_filters = ('visible','source','npi','date_verified',)
form_excluded_columns = ('date_created', 'last_updated',
'category_text', 'reviews')
create_template = 'admin/resource_create.html'
edit_template = 'admin/resource_edit.html'
column_labels = dict(npi='NPI', url='URL')
column_descriptions = dict(npi='The National Provider Identifier (NPI) of the resource.',
hours='The hours of operation for the resource.',
source='The source of the resource\'s information.',
notes='Administrative notes for the resource, not visible to end users.',
date_verified='The date the resource was last verified by an administrator.')
def scaffold_form(self):
"""
Scaffolds the creation/editing form so that the latitude
and longitude fields are optional, but can still be set
by the Google Places API integration.
"""
form_class = super(ResourceView, self).scaffold_form()
# Override the latitude/longitude fields to be optional
form_class.latitude = DecimalField(validators=[validators.Optional()])
form_class.longitude = DecimalField(validators=[validators.Optional()])
return form_class
@action('togglevisible',
'Toggle Visibility',
'Are you sure you wish to toggle visibility for the selected resources?')
def action_togglevisible(self, ids):
"""
Attempts to toggle visibility for each of the specified resources.
Args:
ids: The list of resource IDs, indicating which resources
should have their visibility toggled.
"""
# Load all resources by the set of IDs
target_resources = self.get_query().filter(self.model.id.in_(ids)).all()
# Build a list of all the results
results = []
if len(target_resources) > 0:
for resource in target_resources:
# Build a helpful message string to use for messages.
resource_str = 'resource #' + str(resource.id) + ' (' + resource.name + ')'
visible_status = ''
try:
if not resource.visible:
resource.visible = True
visible_status = ' as visible'
else:
resource.visible = False
visible_status = ' as not visible'
except Exception as ex:
results.append('Error changing ' + resource_str + ': ' + str(ex))
else:
results.append('Marked ' + resource_str + visible_status + '.')
# Save our changes.
self.session.commit()
else:
results.append('No resources were selected.')
# Flash the results of everything
flash("\n".join(msg for msg in results))
@action('markverified',
'Mark Verified',
'Are you sure you wish to mark the selected resources as verified?')
def action_markverified(self, ids):
"""
Attempts to mark each of the specified resources as verified
on the current date.
Args:
ids: The list of resource IDs, indicating which resources
should be marked as verified.
"""
# Load all resources by the set of IDs
target_resources = self.get_query().filter(self.model.id.in_(ids)).all()
# Build a list of all the results
results = []
if len(target_resources) > 0:
for resource in target_resources:
# Build a helpful message string to use for messages.
resource_str = 'resource #' + str(resource.id) + ' (' + resource.name + ')'
try:
resource.date_verified = date.today()
except Exception as ex:
results.append('Error changing ' + resource_str + ': ' + str(ex))
else:
results.append('Marked ' + resource_str + ' as verified.')
# Save our changes.
self.session.commit()
else:
results.append('No resources were selected.')
# Flash the results of everything
flash("\n".join(msg for msg in results))
@action('assigncategories', 'Assign Categories')
def action_assigncategories(self, ids):
"""
Sets up a redirection action for mass-assigning categories
to the specified resources.
Args:
ids: The list of resource IDs that should be updated.
"""
return redirect(url_for('resourcecategoryassignview.index', ids=ids))
def __init__(self, session, **kwargs):
super(ResourceView, self).__init__(Resource, session, **kwargs)
class ResourceRequiringGeocodingView(ResourceView):
"""
An administrative view for working with resources that need geocoding.
"""
column_list = ('name', 'organization', 'address', 'source')
# Disable model creation/deletion
can_create = False
can_delete = False
def get_query(self):
"""
Returns the query for the model type.
Returns:
The query for the model type.
"""
query = self.session.query(self.model)
return self.prepare_geocode_query(query)
def get_count_query(self):
"""
Returns the count query for the model type.
Returns:
The count query for the model type.
"""
query = self.session.query(func.count('*')).select_from(self.model)
return self.prepare_geocode_query(query)
def prepare_geocode_query(self, query):
"""
Prepares the provided query by ensuring that
all relevant geocoding-related filters have been applied.
Args:
query: The query to update.
Returns:
The updated query.
"""
# Ensure an address is defined
query = query.filter(self.model.address != None)
query = query.filter(self.model.address != '')
# Ensure at least one geocoding field is missing
query = query.filter(or_(self.model.latitude == None,
self.model.longitude == None))
return query
@action('geocode',
'Geocode')
def action_geocode(self, ids):
"""
Attempts to geocode each of the specified resources.
Args:
ids: The list of resource IDs, indicating which resources
should be geocoded.
"""
# Load all resources by the set of IDs
target_resources = self.get_query().filter(self.model.id.in_(ids)).all()
# Build a list of all the results
results = []
if len(target_resources) > 0:
# Set up the geocoder, and then try to geocode each resource
geocoder = Geocoder(api_key=current_app.config.get('MAPS_SERVER_KEY'))
for resource in target_resources:
# Build a helpful message string to use for errors.
resource_str = 'resource #' + str(resource.id) + ' (' + resource.name + ')'
try:
geocoder.geocode(resource)
except geopy.exc.GeopyError as gpex:
# Handle Geopy errors separately
exc_type = ''
# Attempt to infer some extra information based on the exception type
if isinstance(gpex, geopy.exc.GeocoderQuotaExceeded):
exc_type = 'quota exceeded'
elif isinstance(gpex, geopy.exc.GeocoderAuthenticationFailure):
exc_type = 'authentication failure'
elif isinstance(gpex, geopy.exc.GeocoderInsufficientPrivileges):
exc_type = 'insufficient privileges'
elif isinstance(gpex, geopy.exc.GeocoderUnavailable):
exc_type = 'server unavailable'
elif isinstance(gpex, geopy.exc.GeocoderTimedOut):
exc_type = 'timed out'
elif isinstance(gpex, geopy.exc.GeocoderQueryError):
exc_type = 'query error'
if len(exc_type) > 0:
exc_type = '(' + exc_type + ') '
results.append('Error geocoding ' + resource_str + ': ' + exc_type + str(gpex))
except Exception as ex:
results.append('Error geocoding ' + resource_str + ': ' + str(ex))
else:
results.append('Geocoded ' + resource_str + '.')
# Save our changes.
self.session.commit()
else:
results.append('No resources were selected.')
# Flash the results of everything
flash("\n".join(msg for msg in results))
@action('removeaddress',
'Remove Address',
'Are you sure you wish to remove address information from the selected resources?')
def action_remove_address(self, ids):
"""
Attempts to remove address information from each of the specified resources.
Args:
ids: The list of resource IDs, indicating which resources
should have address information stripped.
"""
# Load all resources by the set of IDs
target_resources = self.get_query().filter(self.model.id.in_(ids)).all()
# Build a list of all the results
results = []
if len(target_resources) > 0:
for resource in target_resources:
# Build a helpful message string to use for errors.
resource_str = 'resource #' + str(resource.id) + ' (' + resource.name + ')'
try:
resource.address = None
resource.latitude = None
resource.longitude = None
resource.location = None
except Exception as ex:
results.append('Error updating ' + resource_str + ': ' + str(ex))
else:
results.append('Removed address information from ' + resource_str + '.')
# Save our changes.
self.session.commit()
else:
results.append('No resources were selected.')
# Flash the results of everything
flash("\n".join(msg for msg in results))
def __init__(self, session, **kwargs):
# Because we're invoking the ResourceView constructor,
# we don't need to pass in the ResourceModel.
super(ResourceRequiringGeocodingView, self).__init__(session, **kwargs)
class ResourceRequiringCategoriesView(ResourceView):
"""
An administrative view for working with resources that need categories.
"""
column_list = ('name', 'organization', 'address', 'source')
# Disable model creation/deletion
can_create = False
can_delete = False
def get_query(self):
"""
Returns the query for the model type.
Returns:
The query for the model type.
"""
query = self.session.query(self.model)
return self.prepare_category_query(query)
def get_count_query(self):
"""
Returns the count query for the model type.
Returns:
The count query for the model type.
"""
query = self.session.query(func.count('*')).select_from(self.model)
return self.prepare_category_query(query)
def prepare_category_query(self, query):
"""
Prepares the provided query by ensuring that
filtering out resources with categories has been applied.
Args:
query: The query to update.
Returns:
The updated query.
"""
# Ensure there are no categories defined
query = query.filter(not_(self.model.categories.any()))
return query
def __init__(self, session, **kwargs):
# Because we're invoking the ResourceView constructor,
# we don't need to pass in the ResourceModel.
super(ResourceRequiringCategoriesView, self).__init__(session, **kwargs)
class ResourceCategoryAssignView(AdminAuthMixin, BaseView):
"""
The view for mass-assigning resources to categories.
"""
# Not visible in the menu.
def is_visible(self):
return False
@expose('/', methods=['GET', 'POST'])
def index(self):
"""
A view for mass-assigning resources to categories.
"""
# Load all resources by the set of IDs
target_resources = Resource.query.filter(Resource.id.in_(request.args.getlist('ids')))
target_resources = target_resources.order_by(Resource.name.asc()).all()
# Make sure we have some, and go back to the resources
# view (for assigning categories) if we don't.
if len(target_resources) == 0:
flash('At least one resource must be selected.')
return redirect(url_for('category-resourceview.index_view'))
if request.method == 'GET':
# Get all categories
available_categories = Category.query.order_by(Category.name.asc())
available_categories = available_categories.all()
# Return the view for assigning categories
return self.render('admin/resource_assign_categories.html',
ids = request.args.getlist('ids'),
resources = target_resources,
categories = available_categories)
else:
# Get the selected categories - use request.form,
# not request.args
target_categories = Category.query.filter(Category.id.in_(request.form.getlist('categories'))).all()
if len(target_categories) > 0:
# Build a list of all the results
results = []
for resource in target_resources:
# Build a helpful message string to use for resources.
resource_str = 'resource #' + str(resource.id) + ' (' + resource.name + ')'
try:
# Assign all categories
for category in target_categories:
# Make sure we're not double-adding
if not category in resource.categories:
resource.categories.append(category)
except Exception as ex:
results.append('Error updating ' + resource_str + ': ' + str(ex))
else:
results.append('Updated ' + resource_str + '.')
# Save our changes.
self.session.commit()
# Flash the results of everything
flash("\n".join(msg for msg in results))
else:
flash('At least one category must be selected.')
return redirect(url_for('category-resourceview.index_view'))
def __init__(self, session, **kwargs):
self.session = session
super(ResourceCategoryAssignView, self).__init__(**kwargs)
class ResourceRequiringNpiView(ResourceView):
"""
An administrative view for working with resources that need NPI values.
"""
# Disable model creation/deletion
can_create = False
can_delete = False
def get_query(self):
"""
Returns the query for the model type.
Returns:
The query for the model type.
"""
query = self.session.query(self.model)
return self.prepare_npi_query(query)
def get_count_query(self):
"""
Returns the count query for the model type.
Returns:
The count query for the model type.
"""
query = self.session.query(func.count('*')).select_from(self.model)
return self.prepare_npi_query(query)
def prepare_npi_query(self, query):
"""
Prepares the provided query by ensuring that
filtering out resources with NPIs has been applied.
Args:
query: The query to update.
Returns:
The updated query.
"""
# Ensure that an NPI is missing
query = query.filter(or_(self.model.npi == None,
self.model.npi == ''))
return query
def __init__(self, session, **kwargs):
# Because we're invoking the ResourceView constructor,
# we don't need to pass in the ResourceModel.
super(ResourceRequiringNpiView, self).__init__(session, **kwargs)
|
AllieDeford/radremedy
|
remedy/admin_views/resourceview.py
|
Python
|
bsd-3-clause
| 17,605
|
# -*- coding: utf-8 -*-
"""
See PEP 386 (http://www.python.org/dev/peps/pep-0386/)
Release logic:
1. Remove "dev" from current.
2. git commit
3. git tag <version>
4. push to pypi + push to github
5. bump the version, append '.dev0'
6. git commit
7. push to github (to avoid confusion)
"""
__version__ = '0.0.1dev0'
|
adsworth/django-onetomany
|
onetomany/__init__.py
|
Python
|
bsd-3-clause
| 315
|
"""
Classes and utilities for extracting haiku from arbitrary text and evaluating them based on some programmatically
defined criteria
"""
import nltk
import string
from nltk.corpus import cmudict
from nltk_util import syllables_en
from haikus.evaluators import DEFAULT_HAIKU_EVALUATORS
global WORD_DICT
try:
WORD_DICT = cmudict.dict()
except LookupError:
nltk.download('cmudict')
WORD_DICT = cmudict.dict()
class NonwordError(Exception):
pass
class HaikuText(object):
"""
A wrapper around some sequence of text
"""
def __init__(self, text=None):
self._text = text
def get_text(self):
return self._text
def set_text(self, text):
self._text = text
def filtered_text(self):
"""
Strip punctuation from this text
"""
exclude = set(string.punctuation).difference(set("'"))
s = ''.join(ch for ch in self.get_text() if ch not in exclude)
return s
def filtered_word(self, word):
"""
Strip punctation from the given token so we can look it up in
our word dictionary
"""
exclude = set(string.punctuation).difference(set("'"))
filtered = ''.join(ch for ch in word if ch not in exclude)
return filtered
def word_syllables(self, word, override_word=None):
"""
Get the syllable count for the given word, according to WORD_DICT
"""
word = word.encode('ascii', 'ignore').strip().lower()
try:
matches = WORD_DICT[word]
for tree in matches:
return (len([phoneme for phoneme in tree if phoneme[-1].isdigit()]), word)
except KeyError:
return self.unknown_word_handler(word)
def syllable_map(self):
"""
Map words in this text to their syllable count
"""
s = self.filtered_text()
try:
return map(self.word_syllables, s.split())
except NonwordError:
return []
def syllable_count(self):
"""
Sum the syllable counts for all words in this text
"""
return sum([t[0] for t in self.syllable_map()])
def get_haiku(self):
"""
find a haiku at the beginning of the text
"""
syllable_map = self.syllable_map()
return self.find_haiku(syllable_map)
def get_haikus(self):
"""
find all haikus in the text
"""
haikus = []
syllable_map = self.syllable_map()
for i in range(len(syllable_map)):
portion = syllable_map[i:]
if (sum(word[0] for word in portion) >= 17):
haiku = self.find_haiku(portion)
if haiku:
haikus.append(haiku)
else:
break
return haikus
def find_haiku(self, syllable_map):
"""
Find a haiku in this text
"""
haiku = [5, 12, 17]
cumulative = [0]
for w in syllable_map:
cumulative.append(cumulative[-1] + w[0])
cumulative = cumulative[1:]
is_haiku = set(cumulative).intersection(haiku) == set(haiku)
if is_haiku:
lookup = dict((v,k) for k, v in enumerate(cumulative))
enum_lookup = list(enumerate(lookup))
start = 0
lines = []
for line in haiku:
section = syllable_map[start:lookup[line]+1]
words = [s[1] for s in section]
lines.append(' '.join(words))
try:
start = enum_lookup[lookup[line] + 1][0]
except IndexError:
pass
haiku = Haiku()
haiku.set_lines(lines)
return haiku
else:
return False
def has_haiku(self):
"""
Return True if this text contains a haiku
"""
return self.get_haiku() is not False
def unknown_word_handler(self, word):
"""
handle words outside of cmudict by attempting to count their syllables
"""
syllable_count = syllables_en.count(self.filtered_word(word))
if syllable_count > 0:
return (syllable_count, word)
else:
raise NonwordError("%s has no syllables" % word)
class Haiku(object):
"""
A simple wrapper for a haiku's three lines
"""
def get_lines(self):
return self._lines
def set_lines(self, lines):
self._lines = lines
def calculate_quality(self, evaluators=None):
"""
Calculate this haiku's quality
"""
score = 0
for evaluator_class, weight in evaluators:
evaluator = evaluator_class(weight=weight)
score += evaluator(self)
try:
score /= sum([weight for evaluator, weight in evaluators])
except ZeroDivisionError:
pass
return score
def line_end_bigrams(self):
"""
Find the bigrams that occur across any two lines in this text's
haiku
"""
bigrams = ()
lines = [line.split(" ") for line in self.get_lines()]
try:
bigrams = ((lines[0][-1],lines[1][0]),
(lines[1][-1],lines[2][0]))
except IndexError:
return (['', ''], ['', ''])
return bigrams
def flattened_lines(self):
return ' '.join(self.get_lines())
|
wieden-kennedy/haikus
|
haikus/haikutext.py
|
Python
|
bsd-3-clause
| 5,501
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..surface import ProbeVolumeWithModel
def test_ProbeVolumeWithModel_inputs():
input_map = dict(InputModel=dict(argstr='%s',
position=-2,
),
InputVolume=dict(argstr='%s',
position=-3,
),
OutputModel=dict(argstr='%s',
hash_files=False,
position=-1,
),
args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = ProbeVolumeWithModel.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ProbeVolumeWithModel_outputs():
output_map = dict(OutputModel=dict(position=-1,
),
)
outputs = ProbeVolumeWithModel.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
mick-d/nipype
|
nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py
|
Python
|
bsd-3-clause
| 1,175
|
'''
Copyright (C) 2012-2015 Diego Torres Milano
Created on Dec 1, 2012
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Diego Torres Milano
'''
__version__ = '10.0.1'
import sys
import warnings
# import string
# import datetime
if sys.executable:
if 'monkeyrunner' in sys.executable:
warnings.warn(
'''
You should use a 'python' interpreter, not 'monkeyrunner' for this module
''', RuntimeWarning)
import socket
import time
import re
import signal
import os
# import types
import platform
# from com.dtmilano.android.window import Window
# from com.dtmilano.android.common import _nd, _nh, _ns, obtainPxPy, obtainVxVy,\
# obtainVwVh
# from com.dtmilano.android.adb.androidkeymap import KEY_MAP
from .androidkeymap import KEY_MAP
DEBUG = False
DEBUG_TOUCH = DEBUG and False
DEBUG_LOG = DEBUG and False
DEBUG_WINDOWS = DEBUG and False
DEBUG_COORDS = DEBUG and False
try:
HOSTNAME = os.environ['ANDROID_ADB_SERVER_HOST']
except:
HOSTNAME = 'localhost'
try:
PORT = int(os.environ['ANDROID_ADB_SERVER_PORT'])
except KeyError:
PORT = 5037
OKAY = 'OKAY'
FAIL = 'FAIL'
UP = 0
DOWN = 1
DOWN_AND_UP = 2
TIMEOUT = 15
WIFI_SERVICE = 'wifi'
# some device properties
VERSION_SDK_PROPERTY = 'ro.build.version.sdk'
VERSION_RELEASE_PROPERTY = 'ro.build.version.release'
class Device:
@staticmethod
def factory(_str):
if DEBUG:
print >> sys.stderr, "Device.factory(", _str, ")"
values = _str.split(None, 2)
if DEBUG:
print >> sys.stderr, "values=", values
return Device(*values)
def __init__(self, serialno, status, qualifiers=None):
self.serialno = serialno
self.status = status
self.qualifiers = qualifiers
def __str__(self):
return "<<<" + self.serialno + ", " + self.status + ", %s>>>" % self.qualifiers
class WifiManager:
'''
Simulates Android WifiManager.
@see: http://developer.android.com/reference/android/net/wifi/WifiManager.html
'''
WIFI_STATE_DISABLING = 0
WIFI_STATE_DISABLED = 1
WIFI_STATE_ENABLING = 2
WIFI_STATE_ENABLED = 3
WIFI_STATE_UNKNOWN = 4
WIFI_IS_ENABLED_RE = re.compile('Wi-Fi is enabled')
WIFI_IS_DISABLED_RE = re.compile('Wi-Fi is disabled')
def __init__(self, device):
self.device = device
def getWifiState(self):
'''
Gets the Wi-Fi enabled state.
@return: One of WIFI_STATE_DISABLED, WIFI_STATE_DISABLING, WIFI_STATE_ENABLED, WIFI_STATE_ENABLING, WIFI_STATE_UNKNOWN
'''
result = self.device.shell('dumpsys wifi')
if result:
state = result.splitlines()[0]
if self.WIFI_IS_ENABLED_RE.match(state):
return self.WIFI_STATE_ENABLED
elif self.WIFI_IS_DISABLED_RE.match(state):
return self.WIFI_STATE_DISABLED
print >> sys.stderr, "UNKNOWN WIFI STATE:", state
return self.WIFI_STATE_UNKNOWN
class AdbClient:
def __init__(self, serialno=None, hostname=HOSTNAME, port=PORT, settransport=True, reconnect=True, ignoreversioncheck=False):
self.Log = AdbClient.__Log(self)
self.serialno = serialno
self.hostname = hostname
self.port = port
self.reconnect = reconnect
self.__connect()
self.checkVersion(ignoreversioncheck)
self.build = {}
''' Build properties '''
self.__displayInfo = None
''' Cached display info. Reset it to C{None} to force refetching display info '''
self.display = {}
''' The map containing the device's physical display properties: width, height and density '''
self.isTransportSet = False
if settransport and serialno != None:
self.__setTransport()
self.build[VERSION_SDK_PROPERTY] = int(self.__getProp(VERSION_SDK_PROPERTY))
self.initDisplayProperties()
@staticmethod
def setAlarm(timeout):
osName = platform.system()
if osName.startswith('Windows'): # alarm is not implemented in Windows
return
if DEBUG:
print >> sys.stderr, "setAlarm(%d)" % timeout
signal.alarm(timeout)
def setSerialno(self, serialno):
if self.isTransportSet:
raise ValueError("Transport is already set, serialno cannot be set once this is done.")
self.serialno = serialno
self.__setTransport()
self.build[VERSION_SDK_PROPERTY] = int(self.__getProp(VERSION_SDK_PROPERTY))
def setReconnect(self, val):
self.reconnect = val
def __connect(self):
if DEBUG:
print >> sys.stderr, "__connect()"
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(TIMEOUT)
try:
self.socket.connect((self.hostname, self.port))
except socket.error, ex:
raise RuntimeError("ERROR: Connecting to %s:%d: %s.\nIs adb running on your computer?" % (self.socket, self.port, ex))
def close(self):
if DEBUG:
print >> sys.stderr, "Closing socket...", self.socket
if self.socket:
self.socket.close()
def __del__(self):
try:
self.close()
except:
pass
def __send(self, msg, checkok=True, reconnect=False):
if DEBUG:
print >> sys.stderr, "__send(%s, checkok=%s, reconnect=%s)" % (msg, checkok, reconnect)
if not re.search('^host:', msg):
if not self.isTransportSet:
self.__setTransport()
else:
self.checkConnected()
b = bytearray(msg, 'utf-8')
self.socket.send('%04X%s' % (len(b), b))
if checkok:
self.__checkOk()
if reconnect:
if DEBUG:
print >> sys.stderr, " __send: reconnecting"
self.__connect()
self.__setTransport()
def __receive(self, nob=None):
if DEBUG:
print >> sys.stderr, "__receive()"
self.checkConnected()
if nob is None:
nob = int(self.socket.recv(4), 16)
if DEBUG:
print >> sys.stderr, " __receive: receiving", nob, "bytes"
recv = bytearray()
nr = 0
while nr < nob:
chunk = self.socket.recv(min((nob - nr), 4096))
recv.extend(chunk)
nr += len(chunk)
if DEBUG:
print >> sys.stderr, " __receive: returning len=", len(recv)
return str(recv)
def __checkOk(self):
if DEBUG:
print >> sys.stderr, "__checkOk()"
self.checkConnected()
self.setAlarm(TIMEOUT)
recv = self.socket.recv(4)
if DEBUG:
print >> sys.stderr, " __checkOk: recv=", repr(recv)
try:
if recv != OKAY:
error = self.socket.recv(1024)
if error.startswith('0049'):
raise RuntimeError("ERROR: This computer is unauthorized. Please check the confirmation dialog on your device.")
else:
raise RuntimeError("ERROR: %s %s" % (repr(recv), error))
finally:
self.setAlarm(0)
if DEBUG:
print >> sys.stderr, " __checkOk: returning True"
return True
def checkConnected(self):
if DEBUG:
print >> sys.stderr, "checkConnected()"
if not self.socket:
raise RuntimeError("ERROR: Not connected")
if DEBUG:
print >> sys.stderr, " checkConnected: returning True"
return True
def checkVersion(self, ignoreversioncheck=False, reconnect=True):
if DEBUG:
print >> sys.stderr, "checkVersion(reconnect=%s) ignoreversioncheck=%s" % (reconnect, ignoreversioncheck)
self.__send('host:version', reconnect=False)
# HACK: MSG_WAITALL not available on windows
#version = self.socket.recv(8, socket.MSG_WAITALL)
version = self.__readExactly(self.socket, 8)
VALID_ADB_VERSIONS = ["00040020", "0004001f"]
if not (version in VALID_ADB_VERSIONS) and not ignoreversioncheck:
raise RuntimeError("ERROR: Incorrect ADB server version %s (expecting one of %s)" % (version, VALID_ADB_VERSIONS))
if reconnect:
self.__connect()
def __setTransport(self):
if DEBUG:
print >> sys.stderr, "__setTransport()"
if not self.serialno:
raise ValueError("serialno not set, empty or None")
self.checkConnected()
serialnoRE = re.compile(self.serialno)
found = False
devices = self.getDevices()
if len(devices) == 0:
raise RuntimeError("ERROR: There are no connected devices")
for device in devices:
if serialnoRE.match(device.serialno):
found = True
break
if not found:
raise RuntimeError("ERROR: couldn't find device that matches '%s' in %s" % (self.serialno, devices))
self.serialno = device.serialno
msg = 'host:transport:%s' % self.serialno
if DEBUG:
print >> sys.stderr, " __setTransport: msg=", msg
self.__send(msg, reconnect=False)
self.isTransportSet = True
def __checkTransport(self):
if not self.isTransportSet:
raise RuntimeError("ERROR: Transport is not set")
def __readExactly(self, sock, size):
if DEBUG:
print >> sys.stderr, "__readExactly(socket=%s, size=%d)" % (socket, size)
_buffer = ''
while len(_buffer) < size:
data = sock.recv(size-len(_buffer))
if not data:
break
_buffer+=data
return _buffer
def getDevices(self):
if DEBUG:
print >> sys.stderr, "getDevices()"
self.__send('host:devices-l', checkok=False)
try:
self.__checkOk()
except RuntimeError, ex:
print >> sys.stderr, "**ERROR:", ex
return None
devices = []
for line in self.__receive().splitlines():
devices.append(Device.factory(line))
self.__connect()
return devices
def shell(self, cmd=None):
if DEBUG:
print >> sys.stderr, "shell(cmd=%s)" % cmd
self.__checkTransport()
if cmd:
self.__send('shell:%s' % cmd, checkok=True, reconnect=False)
out = ''
while True:
_str = None
try:
_str = self.socket.recv(4096)
except Exception, ex:
print >> sys.stderr, "ERROR:", ex
if not _str:
break
out += _str
if self.reconnect:
if DEBUG:
print >> sys.stderr, "Reconnecting..."
self.close()
self.__connect()
self.__setTransport()
return out
else:
self.__send('shell:')
# sin = self.socket.makefile("rw")
# sout = self.socket.makefile("r")
# return (sin, sin)
sout = adbClient.socket.makefile("r")
return sout
def __getRestrictedScreen(self):
''' Gets C{mRestrictedScreen} values from dumpsys. This is a method to obtain display dimensions '''
rsRE = re.compile('\s*mRestrictedScreen=\((?P<x>\d+),(?P<y>\d+)\) (?P<w>\d+)x(?P<h>\d+)')
for line in self.shell('dumpsys window').splitlines():
m = rsRE.match(line)
if m:
return m.groups()
raise RuntimeError("Couldn't find mRestrictedScreen in 'dumpsys window'")
def getDisplayInfo(self):
self.__checkTransport()
displayInfo = self.getLogicalDisplayInfo()
if displayInfo:
return displayInfo
displayInfo = self.getPhysicalDisplayInfo()
if displayInfo:
return displayInfo
raise RuntimeError("Couldn't find display info in 'wm size', 'dumpsys display' or 'dumpsys window'")
def getLogicalDisplayInfo(self):
'''
Gets C{mDefaultViewport} and then C{deviceWidth} and C{deviceHeight} values from dumpsys.
This is a method to obtain display logical dimensions and density
'''
self.__checkTransport()
logicalDisplayRE = re.compile('.*DisplayViewport{valid=true, .*orientation=(?P<orientation>\d+), .*deviceWidth=(?P<width>\d+), deviceHeight=(?P<height>\d+).*')
for line in self.shell('dumpsys display').splitlines():
m = logicalDisplayRE.search(line, 0)
if m:
self.__displayInfo = {}
for prop in [ 'width', 'height', 'orientation' ]:
self.__displayInfo[prop] = int(m.group(prop))
for prop in [ 'density' ]:
d = self.__getDisplayDensity(None, strip=True, invokeGetPhysicalDisplayIfNotFound=True)
if d:
self.__displayInfo[prop] = d
else:
# No available density information
self.__displayInfo[prop] = -1.0
return self.__displayInfo
return None
def getPhysicalDisplayInfo(self):
''' Gets C{mPhysicalDisplayInfo} values from dumpsys. This is a method to obtain display dimensions and density'''
self.__checkTransport()
phyDispRE = re.compile('Physical size: (?P<width>)x(?P<height>).*Physical density: (?P<density>)', re.MULTILINE)
m = phyDispRE.search(self.shell('wm size; wm density'))
if m:
displayInfo = {}
for prop in [ 'width', 'height' ]:
displayInfo[prop] = int(m.group(prop))
for prop in [ 'density' ]:
displayInfo[prop] = float(m.group(prop))
return displayInfo
phyDispRE = re.compile('.*PhysicalDisplayInfo{(?P<width>\d+) x (?P<height>\d+), .*, density (?P<density>[\d.]+).*')
for line in self.shell('dumpsys display').splitlines():
m = phyDispRE.search(line, 0)
if m:
displayInfo = {}
for prop in [ 'width', 'height' ]:
displayInfo[prop] = int(m.group(prop))
for prop in [ 'density' ]:
# In mPhysicalDisplayInfo density is already a factor, no need to calculate
displayInfo[prop] = float(m.group(prop))
return displayInfo
# This could also be mSystem or mOverscanScreen
phyDispRE = re.compile('\s*mUnrestrictedScreen=\((?P<x>\d+),(?P<y>\d+)\) (?P<width>\d+)x(?P<height>\d+)')
# This is known to work on older versions (i.e. API 10) where mrestrictedScreen is not available
dispWHRE = re.compile('\s*DisplayWidth=(?P<width>\d+) *DisplayHeight=(?P<height>\d+)')
for line in self.shell('dumpsys window').splitlines():
m = phyDispRE.search(line, 0)
if not m:
m = dispWHRE.search(line, 0)
if m:
displayInfo = {}
for prop in [ 'width', 'height' ]:
displayInfo[prop] = int(m.group(prop))
for prop in [ 'density' ]:
d = self.__getDisplayDensity(None, strip=True, invokeGetPhysicalDisplayIfNotFound=False)
if d:
displayInfo[prop] = d
else:
# No available density information
displayInfo[prop] = -1.0
return displayInfo
def __getProp(self, key, strip=True):
if DEBUG:
print >> sys.stderr, "__getProp(%s, %s)" % (key, strip)
prop = self.shell('getprop %s' % key)
if strip:
prop = prop.rstrip('\r\n')
if DEBUG:
print >> sys.stderr, " __getProp: returning '%s'" % prop
return prop
def __getDisplayWidth(self, key, strip=True):
if self.__displayInfo and 'width' in self.__displayInfo:
return self.__displayInfo['width']
return self.getDisplayInfo()['width']
def __getDisplayHeight(self, key, strip=True):
if self.__displayInfo and 'height' in self.__displayInfo:
return self.__displayInfo['height']
return self.getDisplayInfo()['height']
def __getDisplayOrientation(self, key, strip=True):
if self.__displayInfo and 'orientation' in self.__displayInfo:
return self.__displayInfo['orientation']
displayInfo = self.getDisplayInfo()
if 'orientation' in displayInfo:
return displayInfo['orientation']
# Fallback method to obtain the orientation
# See https://github.com/dtmilano/AndroidViewClient/issues/128
surfaceOrientationRE = re.compile('SurfaceOrientation:\s+(\d+)')
output = self.shell('dumpsys input')
m = surfaceOrientationRE.search(output)
if m:
return int(m.group(1))
# We couldn't obtain the orientation
return -1
def __getDisplayDensity(self, key, strip=True, invokeGetPhysicalDisplayIfNotFound=True):
if self.__displayInfo and 'density' in self.__displayInfo: # and self.__displayInfo['density'] != -1: # FIXME: need more testing
return self.__displayInfo['density']
BASE_DPI = 160.0
d = self.getProperty('ro.sf.lcd_density', strip)
if d:
return float(d)/BASE_DPI
d = self.getProperty('qemu.sf.lcd_density', strip)
if d:
return float(d)/BASE_DPI
if invokeGetPhysicalDisplayIfNotFound:
return self.getPhysicalDisplayInfo()['density']
return -1.0
def getSystemProperty(self, key, strip=True):
self.__checkTransport()
return self.getProperty(key, strip)
def getProperty(self, key, strip=True):
''' Gets the property value for key '''
self.__checkTransport()
import collections
MAP_PROPS = collections.OrderedDict([
(re.compile('display.width'), self.__getDisplayWidth),
(re.compile('display.height'), self.__getDisplayHeight),
(re.compile('display.density'), self.__getDisplayDensity),
(re.compile('display.orientation'), self.__getDisplayOrientation),
(re.compile('.*'), self.__getProp),
])
'''Maps properties key values (as regexps) to instance methods to obtain its values.'''
for kre in MAP_PROPS.keys():
if kre.match(key):
return MAP_PROPS[kre](key=key, strip=strip)
raise ValueError("key='%s' does not match any map entry")
def getSdkVersion(self):
'''
Gets the SDK version.
'''
self.__checkTransport()
return self.build[VERSION_SDK_PROPERTY]
def press(self, name, eventType=DOWN_AND_UP):
self.__checkTransport()
if isinstance(name, unicode):
name = name.decode('ascii', errors='replace')
cmd = 'input keyevent %s' % name
if DEBUG:
print >> sys.stderr, "press(%s)" % cmd
self.shell(cmd)
def longPress(self, name, duration=0.5, dev='/dev/input/event0'):
self.__checkTransport()
# WORKAROUND:
# Using 'input keyevent --longpress POWER' does not work correctly in
# KitKat (API 19), it sends a short instead of a long press.
# This uses the events instead, but it may vary from device to device.
# The events sent are device dependent and may not work on other devices.
# If this does not work on your device please do:
# $ adb shell getevent -l
# and post the output to https://github.com/dtmilano/AndroidViewClient/issues
# specifying the device and API level.
if name[0:4] == 'KEY_':
name = name[4:]
if name in KEY_MAP:
self.shell('sendevent %s 1 %d 1' % (dev, KEY_MAP[name]))
self.shell('sendevent %s 0 0 0' % dev)
time.sleep(duration)
self.shell('sendevent %s 1 %d 0' % (dev, KEY_MAP[name]))
self.shell('sendevent %s 0 0 0' % dev)
return
version = self.getSdkVersion()
if version >= 19:
cmd = 'input keyevent --longpress %s' % name
if DEBUG:
print >> sys.stderr, "longPress(%s)" % cmd
self.shell(cmd)
else:
raise RuntimeError("longpress: not supported for API < 19 (version=%d)" % version)
def startActivity(self, component=None, flags=None, uri=None):
self.__checkTransport()
cmd = 'am start'
if component:
cmd += ' -n %s' % component
if flags:
cmd += ' -f %s' % flags
if uri:
cmd += ' %s' % uri
if DEBUG:
print >> sys.stderr, "Starting activity: %s" % cmd
out = self.shell(cmd)
if re.search(r"(Error type)|(Error: )|(Cannot find 'App')", out, re.IGNORECASE | re.MULTILINE):
raise RuntimeError(out)
def takeSnapshot(self, reconnect=False):
'''
Takes a snapshot of the device and return it as a PIL Image.
'''
self.__checkTransport()
try:
from PIL import Image
except:
raise Exception("You have to install PIL to use takeSnapshot()")
self.__send('framebuffer:', checkok=True, reconnect=False)
import struct
# case 1: // version
# return 12; // bpp, size, width, height, 4*(length, offset)
received = self.__receive(1 * 4 + 12 * 4)
(version, bpp, size, width, height, roffset, rlen, boffset, blen, goffset, glen, aoffset, alen) = struct.unpack('<' + 'L' * 13, received)
if DEBUG:
print >> sys.stderr, " takeSnapshot:", (version, bpp, size, width, height, roffset, rlen, boffset, blen, goffset, glen, aoffset, alen)
offsets = {roffset:'R', goffset:'G', boffset:'B'}
if bpp == 32:
if alen != 0:
offsets[aoffset] = 'A'
else:
warnings.warn('''framebuffer is specified as 32bpp but alpha length is 0''')
argMode = ''.join([offsets[o] for o in sorted(offsets)])
if DEBUG:
print >> sys.stderr, " takeSnapshot:", (version, bpp, size, width, height, roffset, rlen, boffset, blen, goffset, blen, aoffset, alen, argMode)
if argMode == 'BGRA':
argMode = 'RGBA'
if bpp == 16:
mode = 'RGB'
argMode += ';16'
else:
mode = argMode
self.__send('\0', checkok=False, reconnect=False)
if DEBUG:
print >> sys.stderr, " takeSnapshot: reading %d bytes" % (size)
received = self.__receive(size)
if reconnect:
self.__connect()
self.__setTransport()
if DEBUG:
print >> sys.stderr, " takeSnapshot: Image.frombuffer(%s, %s, %s, %s, %s, %s, %s)" % (mode, (width, height), 'data', 'raw', argMode, 0, 1)
image = Image.frombuffer(mode, (width, height), received, 'raw', argMode, 0, 1)
# Just in case let's get the real image size
if 'orientation' in self.display:
r = (0, 90, 180, -90)[self.display['orientation']]
image = image.rotate(r)
return image
def __transformPointByOrientation(self, (x, y), orientationOrig, orientationDest):
if orientationOrig != orientationDest:
if orientationDest == 1:
_x = x
x = self.display['width'] - y
y = _x
elif orientationDest == 3:
_x = x
x = y
y = self.display['height'] - _x
return (x, y)
def touch(self, x, y, orientation=-1, eventType=DOWN_AND_UP):
if DEBUG_TOUCH:
print >> sys.stderr, "touch(x=", x, ", y=", y, ", orientation=", orientation, ", eventType=", eventType, ")"
self.__checkTransport()
if orientation == -1:
orientation = self.display['orientation']
self.shell('input tap %d %d' % self.__transformPointByOrientation((x, y), orientation, self.display['orientation']))
def touchDip(self, x, y, orientation=-1, eventType=DOWN_AND_UP):
if DEBUG_TOUCH:
print >> sys.stderr, "touchDip(x=", x, ", y=", y, ", orientation=", orientation, ", eventType=", eventType, ")"
self.__checkTransport()
if orientation == -1:
orientation = self.display['orientation']
x = x * self.display['density']
y = y * self.display['density']
self.touch(x, y, orientation, eventType)
def longTouch(self, x, y, duration=2000):
'''
Long touches at (x, y)
@param duration: duration in ms
This workaround was suggested by U{HaMi<http://stackoverflow.com/users/2571957/hami>}
'''
self.__checkTransport()
self.drag((x, y), (x, y), duration, 1)
def drag(self, (x0, y0), (x1, y1), duration, steps=1, orientation=-1):
'''
Sends drag event n PX (actually it's using C{input swipe} command.
@param (x0, y0): starting point in PX
@param (x1, y1): ending point in PX
@param duration: duration of the event in ms
@param steps: number of steps (currently ignored by @{input swipe}
'''
self.__checkTransport()
if orientation == -1:
orientation = self.display['orientation']
(x0, y0) = self.__transformPointByOrientation((x0, y0), orientation, self.display['orientation'])
(x1, y1) = self.__transformPointByOrientation((x1, y1), orientation, self.display['orientation'])
version = self.getSdkVersion()
if version <= 15:
raise RuntimeError('drag: API <= 15 not supported (version=%d)' % version)
elif version <= 17:
self.shell('input swipe %d %d %d %d' % (x0, y0, x1, y1))
else:
self.shell('input touchscreen swipe %d %d %d %d %d' % (x0, y0, x1, y1, duration))
def dragDip(self, (x0, y0), (x1, y1), duration, steps=1, orientation=-1):
'''
Sends drag event in DIP (actually it's using C{input swipe} command.
@param (x0, y0): starting point in DIP
@param (x1, y1): ending point in DIP
@param duration: duration of the event in ms
@param steps: number of steps (currently ignored by @{input swipe}
'''
self.__checkTransport()
if orientation == -1:
orientation = self.display['orientation']
density = self.display['density'] if self.display['density'] > 0 else 1
x0 = x0 * density
y0 = y0 * density
x1 = x1 * density
y1 = y1 * density
self.drag((x0, y0), (x1, y1), duration, steps, orientation)
def type(self, text):
self.__checkTransport()
self.shell(u'input text "%s"' % text)
def wake(self):
self.__checkTransport()
if not self.isScreenOn():
self.shell('input keyevent POWER')
def isLocked(self):
'''
Checks if the device screen is locked.
@return True if the device screen is locked
'''
self.__checkTransport()
lockScreenRE = re.compile('mShowingLockscreen=(true|false)')
m = lockScreenRE.search(self.shell('dumpsys window policy'))
if m:
return (m.group(1) == 'true')
raise RuntimeError("Couldn't determine screen lock state")
def isScreenOn(self):
'''
Checks if the screen is ON.
@return True if the device screen is ON
'''
self.__checkTransport()
screenOnRE = re.compile('mScreenOnFully=(true|false)')
m = screenOnRE.search(self.shell('dumpsys window policy'))
if m:
return (m.group(1) == 'true')
raise RuntimeError("Couldn't determine screen ON state")
def unlock(self):
'''
Unlocks the screen of the device.
'''
self.__checkTransport()
self.shell('input keyevent MENU')
self.shell('input keyevent BACK')
@staticmethod
def percentSame(image1, image2):
'''
Returns the percent of pixels that are equal
@author: catshoes
'''
# If the images differ in size, return 0% same.
size_x1, size_y1 = image1.size
size_x2, size_y2 = image2.size
if (size_x1 != size_x2 or
size_y1 != size_y2):
return 0
# Images are the same size
# Return the percent of pixels that are equal.
numPixelsSame = 0
numPixelsTotal = size_x1 * size_y1
image1Pixels = image1.load()
image2Pixels = image2.load()
# Loop over all pixels, comparing pixel in image1 to image2
for x in range(size_x1):
for y in range(size_y1):
if (image1Pixels[x, y] == image2Pixels[x, y]):
numPixelsSame += 1
return numPixelsSame / float(numPixelsTotal)
@staticmethod
def sameAs(image1, image2, percent=1.0):
'''
Compares 2 images
@author: catshoes
'''
return (AdbClient.percentSame(image1, image2) >= percent)
def isKeyboardShown(self):
'''
Whether the keyboard is displayed.
'''
self.__checkTransport()
dim = self.shell('dumpsys input_method')
if dim:
# FIXME: API >= 15 ?
return "mInputShown=true" in dim
return False
def initDisplayProperties(self):
self.__checkTransport()
self.__displayInfo = None
self.display['width'] = self.getProperty('display.width')
self.display['height'] = self.getProperty('display.height')
self.display['density'] = self.getProperty('display.density')
self.display['orientation'] = self.getProperty('display.orientation')
def log(self, tag, message, priority='D', verbose=False):
if DEBUG_LOG:
print >> sys.stderr, "log(tag=%s, message=%s, priority=%s, verbose=%s)" % (tag, message, priority, verbose)
self.__checkTransport()
message = self.substituteDeviceTemplate(message)
if verbose or priority == 'V':
print >> sys.stderr, tag+':', message
self.shell('log -p %c -t "%s" %s' % (priority, tag, message))
class __Log():
'''
Log class to simulate C{android.util.Log}
'''
def __init__(self, adbClient):
self.adbClient = adbClient
def __getattr__(self, attr):
'''
Returns the corresponding log method or @C{AttributeError}.
'''
if attr in ['v', 'd', 'i', 'w', 'e']:
return lambda tag, message, verbose: self.adbClient.log(tag, message, priority=attr.upper(), verbose=verbose)
raise AttributeError(self.__class__.__name__ + ' has no attribute "%s"' % attr)
def getSystemService(self, name):
if name == WIFI_SERVICE:
return WifiManager(self)
# def getWindows(self):
# self.__checkTransport()
# windows = {}
# dww = self.shell('dumpsys window windows')
# if DEBUG_WINDOWS: print >> sys.stderr, dww
# lines = dww.splitlines()
# widRE = re.compile('^ *Window #%s Window{%s (u\d+ )?%s?.*}:' %
# (_nd('num'), _nh('winId'), _ns('activity', greedy=True)))
# currentFocusRE = re.compile('^ mCurrentFocus=Window{%s .*' % _nh('winId'))
# viewVisibilityRE = re.compile(' mViewVisibility=0x%s ' % _nh('visibility'))
# # This is for 4.0.4 API-15
# containingFrameRE = re.compile('^ *mContainingFrame=\[%s,%s\]\[%s,%s\] mParentFrame=\[%s,%s\]\[%s,%s\]' %
# (_nd('cx'), _nd('cy'), _nd('cw'), _nd('ch'), _nd('px'), _nd('py'), _nd('pw'), _nd('ph')))
# contentFrameRE = re.compile('^ *mContentFrame=\[%s,%s\]\[%s,%s\] mVisibleFrame=\[%s,%s\]\[%s,%s\]' %
# (_nd('x'), _nd('y'), _nd('w'), _nd('h'), _nd('vx'), _nd('vy'), _nd('vx1'), _nd('vy1')))
# # This is for 4.1 API-16
# framesRE = re.compile('^ *Frames: containing=\[%s,%s\]\[%s,%s\] parent=\[%s,%s\]\[%s,%s\]' %
# (_nd('cx'), _nd('cy'), _nd('cw'), _nd('ch'), _nd('px'), _nd('py'), _nd('pw'), _nd('ph')))
# contentRE = re.compile('^ *content=\[%s,%s\]\[%s,%s\] visible=\[%s,%s\]\[%s,%s\]' %
# (_nd('x'), _nd('y'), _nd('w'), _nd('h'), _nd('vx'), _nd('vy'), _nd('vx1'), _nd('vy1')))
# policyVisibilityRE = re.compile('mPolicyVisibility=%s ' % _ns('policyVisibility', greedy=True))
# for l in range(len(lines)):
# m = widRE.search(lines[l])
# if m:
# num = int(m.group('num'))
# winId = m.group('winId')
# activity = m.group('activity')
# wvx = 0
# wvy = 0
# wvw = 0
# wvh = 0
# px = 0
# py = 0
# visibility = -1
# policyVisibility = 0x0
# for l2 in range(l+1, len(lines)):
# m = widRE.search(lines[l2])
# if m:
# l += (l2-1)
# break
# m = viewVisibilityRE.search(lines[l2])
# if m:
# visibility = int(m.group('visibility'))
# if DEBUG_COORDS: print >> sys.stderr, "getWindows: visibility=", visibility
# if self.build[VERSION_SDK_PROPERTY] >= 17:
# wvx, wvy = (0, 0)
# wvw, wvh = (0, 0)
# if self.build[VERSION_SDK_PROPERTY] >= 16:
# m = framesRE.search(lines[l2])
# if m:
# px, py = obtainPxPy(m)
# m = contentRE.search(lines[l2+1])
# if m:
# # FIXME: the information provided by 'dumpsys window windows' in 4.2.1 (API 16)
# # when there's a system dialog may not be correct and causes the View coordinates
# # be offset by this amount, see
# # https://github.com/dtmilano/AndroidViewClient/issues/29
# wvx, wvy = obtainVxVy(m)
# wvw, wvh = obtainVwVh(m)
# elif self.build[VERSION_SDK_PROPERTY] == 15:
# m = containingFrameRE.search(lines[l2])
# if m:
# px, py = obtainPxPy(m)
# m = contentFrameRE.search(lines[l2+1])
# if m:
# wvx, wvy = obtainVxVy(m)
# wvw, wvh = obtainVwVh(m)
# elif self.build[VERSION_SDK_PROPERTY] == 10:
# m = containingFrameRE.search(lines[l2])
# if m:
# px, py = obtainPxPy(m)
# m = contentFrameRE.search(lines[l2+1])
# if m:
# wvx, wvy = obtainVxVy(m)
# wvw, wvh = obtainVwVh(m)
# else:
# warnings.warn("Unsupported Android version %d" % self.build[VERSION_SDK_PROPERTY])
# #print >> sys.stderr, "Searching policyVisibility in", lines[l2]
# m = policyVisibilityRE.search(lines[l2])
# if m:
# policyVisibility = 0x0 if m.group('policyVisibility') == 'true' else 0x8
# windows[winId] = Window(num, winId, activity, wvx, wvy, wvw, wvh, px, py, visibility + policyVisibility)
# else:
# m = currentFocusRE.search(lines[l])
# if m:
# currentFocus = m.group('winId')
# if currentFocus in windows and windows[currentFocus].visibility == 0:
# if DEBUG_COORDS:
# print >> sys.stderr, "getWindows: focus=", currentFocus
# print >> sys.stderr, "getWindows:", windows[currentFocus]
# windows[currentFocus].focused = True
# return windows
# def getFocusedWindow(self):
# '''
# Gets the focused window.
# @return: The focused L{Window}.
# '''
# for window in self.getWindows().values():
# if window.focused:
# return window
# return None
# def getFocusedWindowName(self):
# '''
# Gets the focused window name.
# This is much like monkeyRunner's C{HierarchyView.getWindowName()}
# @return: The focused window name
# '''
# window = self.getFocusedWindow()
# if window:
# return window.activity
# return None
# def substituteDeviceTemplate(self, template):
# serialno = self.serialno.replace('.', '_').replace(':', '-')
# focusedWindowName = self.getFocusedWindowName().replace('/', '-').replace('.', '_')
# timestamp = datetime.datetime.now().isoformat()
# _map = {
# 'serialno': serialno,
# 'focusedwindowname': focusedWindowName,
# 'timestamp': timestamp
# }
# return string.Template(template).substitute(_map)
if __name__ == '__main__':
adbClient = AdbClient(os.environ['ANDROID_SERIAL'])
INTERACTIVE = False
if INTERACTIVE:
sout = adbClient.shell()
prompt = re.compile(".+@android:(.*) [$#] \r\r\n")
while True:
try:
cmd = raw_input('adb $ ')
except EOFError:
break
if cmd == 'exit':
break
adbClient.socket.__send(cmd + "\r\n")
sout.readline(4096) # eat first line, which is the command
while True:
line = sout.readline(4096)
if prompt.match(line):
break
print line,
if not line:
break
print "\nBye"
else:
print 'date:', adbClient.shell('date')
|
NetEase/airtest
|
airtest/device/adb/adbclient.py
|
Python
|
bsd-3-clause
| 39,385
|
from datetime import datetime
import numpy as np
import pytest
from pytz import UTC
from pandas._libs.tslibs import (
OutOfBoundsTimedelta,
conversion,
iNaT,
timezones,
tzconversion,
)
from pandas import Timestamp, date_range
import pandas._testing as tm
def _compare_utc_to_local(tz_didx):
def f(x):
return tzconversion.tz_convert_from_utc_single(x, tz_didx.tz)
result = tzconversion.tz_convert_from_utc(tz_didx.asi8, tz_didx.tz)
expected = np.vectorize(f)(tz_didx.asi8)
tm.assert_numpy_array_equal(result, expected)
def _compare_local_to_utc(tz_didx, naive_didx):
# Check that tz_localize behaves the same vectorized and pointwise.
err1 = err2 = None
try:
result = tzconversion.tz_localize_to_utc(naive_didx.asi8, tz_didx.tz)
err1 = None
except Exception as err:
err1 = err
try:
expected = naive_didx.map(lambda x: x.tz_localize(tz_didx.tz)).asi8
except Exception as err:
err2 = err
if err1 is not None:
assert type(err1) == type(err2)
else:
assert err2 is None
tm.assert_numpy_array_equal(result, expected)
def test_tz_convert_single_matches_tz_convert_hourly(tz_aware_fixture):
tz = tz_aware_fixture
tz_didx = date_range("2014-03-01", "2015-01-10", freq="H", tz=tz)
naive_didx = date_range("2014-03-01", "2015-01-10", freq="H")
_compare_utc_to_local(tz_didx)
_compare_local_to_utc(tz_didx, naive_didx)
@pytest.mark.parametrize("freq", ["D", "A"])
def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq):
tz = tz_aware_fixture
tz_didx = date_range("2000-01-01", "2020-01-01", freq=freq, tz=tz)
naive_didx = date_range("2000-01-01", "2020-01-01", freq=freq)
_compare_utc_to_local(tz_didx)
_compare_local_to_utc(tz_didx, naive_didx)
@pytest.mark.parametrize(
"arr",
[
pytest.param(np.array([], dtype=np.int64), id="empty"),
pytest.param(np.array([iNaT], dtype=np.int64), id="all_nat"),
],
)
def test_tz_convert_corner(arr):
result = tzconversion.tz_convert_from_utc(arr, timezones.maybe_get_tz("Asia/Tokyo"))
tm.assert_numpy_array_equal(result, arr)
def test_tz_convert_readonly():
# GH#35530
arr = np.array([0], dtype=np.int64)
arr.setflags(write=False)
result = tzconversion.tz_convert_from_utc(arr, UTC)
tm.assert_numpy_array_equal(result, arr)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("dtype", ["M8[ns]", "M8[s]"])
def test_length_zero_copy(dtype, copy):
arr = np.array([], dtype=dtype)
result = conversion.ensure_datetime64ns(arr, copy=copy)
assert result.base is (None if copy else arr)
def test_ensure_datetime64ns_bigendian():
# GH#29684
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = conversion.ensure_datetime64ns(arr)
expected = np.array([np.datetime64(1, "ms")], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_ensure_timedelta64ns_overflows():
arr = np.arange(10).astype("m8[Y]") * 100
msg = r"Out of bounds for nanosecond timedelta64\[Y\] 900"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
conversion.ensure_timedelta64ns(arr)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"dt, expected",
[
pytest.param(
Timestamp("2000-01-01"), Timestamp("2000-01-01", tz=UTC), id="timestamp"
),
pytest.param(
datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=UTC), id="datetime"
),
pytest.param(
SubDatetime(2000, 1, 1),
SubDatetime(2000, 1, 1, tzinfo=UTC),
id="subclassed_datetime",
),
],
)
def test_localize_pydatetime_dt_types(dt, expected):
# GH 25851
# ensure that subclassed datetime works with
# localize_pydatetime
result = conversion.localize_pydatetime(dt, UTC)
assert result == expected
|
jreback/pandas
|
pandas/tests/tslibs/test_conversion.py
|
Python
|
bsd-3-clause
| 3,973
|
"""
Here a list of proxy object that can be used when lazy=True at neo.io level.
This idea is to be able to postpone that real in memory loading
for objects that contains big data (AnalogSIgnal, SpikeTrain, Event, Epoch).
The implementation rely on neo.rawio, so it will available only for neo.io that
ineherits neo.rawio.
"""
import numpy as np
import quantities as pq
import logging
from neo.core.baseneo import BaseNeo
from neo.core import (AnalogSignal,
Epoch, Event, SpikeTrain)
from neo.core.dataobject import ArrayDict
logger = logging.getLogger("Neo")
class BaseProxy(BaseNeo):
def __init__(self, array_annotations=None, **annotations):
# this for py27 str vs py3 str in neo attributes ompatibility
annotations = check_annotations(annotations)
if 'file_origin' not in annotations:
# the str is to make compatible with neo_py27 where attribute
# used to be str so raw bytes
annotations['file_origin'] = str(self._rawio.source_name())
# this mock the array annotaions to avoid inherits DataObject
self.array_annotations = ArrayDict(self.shape[-1])
if array_annotations is not None:
self.array_annotations.update(array_annotations)
BaseNeo.__init__(self, **annotations)
def load(self, time_slice=None, **kwargs):
# should be implemented by subclass
raise NotImplementedError
def time_slice(self, t_start, t_stop):
'''
Load the proxy object within the specified time range. Has the same
call signature as AnalogSignal.time_slice, Epoch.time_slice, etc.
'''
return self.load(time_slice=(t_start, t_stop))
class AnalogSignalProxy(BaseProxy):
'''
This object mimic AnalogSignal except that it does not
have the signals array itself. All attributes and annotations are here.
The goal is to postpone the loading of data into memory
when reading a file with the new lazy load system based
on neo.rawio.
This object must not be constructed directly but is given
neo.io when lazy=True instead of a true AnalogSignal.
The AnalogSignalProxy is able to load:
* only a slice of time
* only a subset of channels
* have an internal raw magnitude identic to the file (int16) with
a pq.CompoundUnit().
Usage:
>>> proxy_anasig = AnalogSignalProxy(rawio=self.reader,
global_channel_indexes=None,
block_index=0,
seg_index=0)
>>> anasig = proxy_anasig.load()
>>> slice_of_anasig = proxy_anasig.load(time_slice=(1.*pq.s, 2.*pq.s))
>>> some_channel_of_anasig = proxy_anasig.load(channel_indexes=[0,5,10])
'''
_single_parent_objects = ('Segment', 'ChannelIndex')
_necessary_attrs = (('sampling_rate', pq.Quantity, 0),
('t_start', pq.Quantity, 0))
_recommended_attrs = BaseNeo._recommended_attrs
proxy_for = AnalogSignal
def __init__(self, rawio=None, global_channel_indexes=None, block_index=0, seg_index=0):
self._rawio = rawio
self._block_index = block_index
self._seg_index = seg_index
if global_channel_indexes is None:
global_channel_indexes = slice(None)
total_nb_chan = self._rawio.header['signal_channels'].size
self._global_channel_indexes = np.arange(total_nb_chan)[global_channel_indexes]
self._nb_chan = self._global_channel_indexes.size
sig_chans = self._rawio.header['signal_channels'][self._global_channel_indexes]
assert np.unique(sig_chans['units']).size == 1, 'Channel do not have same units'
assert np.unique(sig_chans['dtype']).size == 1, 'Channel do not have same dtype'
assert np.unique(sig_chans['sampling_rate']).size == 1, \
'Channel do not have same sampling_rate'
self.units = ensure_signal_units(sig_chans['units'][0])
self.dtype = sig_chans['dtype'][0]
self.sampling_rate = sig_chans['sampling_rate'][0] * pq.Hz
self.sampling_period = 1. / self.sampling_rate
sigs_size = self._rawio.get_signal_size(block_index=block_index, seg_index=seg_index,
channel_indexes=self._global_channel_indexes)
self.shape = (sigs_size, self._nb_chan)
self.t_start = self._rawio.get_signal_t_start(block_index, seg_index,
self._global_channel_indexes) * pq.s
# magnitude_mode='raw' is supported only if all offset=0
# and all gain are the same
support_raw_magnitude = np.all(sig_chans['gain'] == sig_chans['gain'][0]) and \
np.all(sig_chans['offset'] == 0.)
if support_raw_magnitude:
str_units = ensure_signal_units(sig_chans['units'][0]).units.dimensionality.string
self._raw_units = pq.CompoundUnit('{}*{}'.format(sig_chans['gain'][0], str_units))
else:
self._raw_units = None
# both necessary attr and annotations
annotations = {}
annotations['name'] = self._make_name(None)
if len(sig_chans) == 1:
# when only one channel raw_annotations are set to standart annotations
d = self._rawio.raw_annotations['blocks'][block_index]['segments'][seg_index][
'signals'][self._global_channel_indexes[0]]
annotations.update(d)
array_annotations = {
'channel_names': np.array(sig_chans['name'], copy=True),
'channel_ids': np.array(sig_chans['id'], copy=True),
}
# array annotations for signal can be at 2 places
# global at signal channel level
d = self._rawio.raw_annotations['signal_channels']
array_annotations.update(create_analogsignal_array_annotations(
d, self._global_channel_indexes))
# or specific to block/segment/signals
d = self._rawio.raw_annotations['blocks'][block_index]['segments'][seg_index]['signals']
array_annotations.update(create_analogsignal_array_annotations(
d, self._global_channel_indexes))
BaseProxy.__init__(self, array_annotations=array_annotations, **annotations)
def _make_name(self, channel_indexes):
sig_chans = self._rawio.header['signal_channels'][self._global_channel_indexes]
if channel_indexes is not None:
sig_chans = sig_chans[channel_indexes]
if len(sig_chans) == 1:
name = sig_chans['name'][0]
else:
name = 'Channel bundle ({}) '.format(','.join(sig_chans['name']))
return name
@property
def duration(self):
'''Signal duration'''
return self.shape[0] / self.sampling_rate
@property
def t_stop(self):
'''Time when signal ends'''
return self.t_start + self.duration
def load(self, time_slice=None, strict_slicing=True,
channel_indexes=None, magnitude_mode='rescaled'):
'''
*Args*:
:time_slice: None or tuple of the time slice expressed with quantities.
None is the entire signal.
:channel_indexes: None or list. Channels to load. None is all channels
Be carefull that channel_indexes represent the local channel index inside
the AnalogSignal and not the global_channel_indexes like in rawio.
:magnitude_mode: 'rescaled' or 'raw'.
For instance if the internal dtype is int16:
* **rescaled** give [1.,2.,3.]*pq.uV and the dtype is float32
* **raw** give [10, 20, 30]*pq.CompoundUnit('0.1*uV')
The CompoundUnit with magnitude_mode='raw' is usefull to
postpone the scaling when needed and having an internal dtype=int16
but it less intuitive when you don't know so well quantities.
:strict_slicing: True by default.
Control if an error is raise or not when one of time_slice member
(t_start or t_stop) is outside the real time range of the segment.
'''
if channel_indexes is None:
channel_indexes = slice(None)
sr = self.sampling_rate
if time_slice is None:
i_start, i_stop = None, None
sig_t_start = self.t_start
else:
t_start, t_stop = time_slice
if t_start is None:
i_start = None
sig_t_start = self.t_start
else:
t_start = ensure_second(t_start)
if strict_slicing:
assert self.t_start <= t_start <= self.t_stop, 't_start is outside'
else:
t_start = max(t_start, self.t_start)
# the i_start is ncessary ceil
i_start = int(np.ceil((t_start - self.t_start).magnitude * sr.magnitude))
# this needed to get the real t_start of the first sample
# because do not necessary match what is demanded
sig_t_start = self.t_start + i_start / sr
if t_stop is None:
i_stop = None
else:
t_stop = ensure_second(t_stop)
if strict_slicing:
assert self.t_start <= t_stop <= self.t_stop, 't_stop is outside'
else:
t_stop = min(t_stop, self.t_stop)
i_stop = int((t_stop - self.t_start).magnitude * sr.magnitude)
raw_signal = self._rawio.get_analogsignal_chunk(block_index=self._block_index,
seg_index=self._seg_index, i_start=i_start, i_stop=i_stop,
channel_indexes=self._global_channel_indexes[channel_indexes])
# if slice in channel : change name and array_annotations
if raw_signal.shape[1] != self._nb_chan:
name = self._make_name(channel_indexes)
array_annotations = {k: v[channel_indexes] for k, v in self.array_annotations.items()}
else:
name = self.name
array_annotations = self.array_annotations
if magnitude_mode == 'raw':
assert self._raw_units is not None,\
'raw magnitude is not support gain are not the same for all channel or offset is not 0'
sig = raw_signal
units = self._raw_units
elif magnitude_mode == 'rescaled':
# dtype is float32 when internally it is float32 or int16
if self.dtype == 'float64':
dtype = 'float64'
else:
dtype = 'float32'
sig = self._rawio.rescale_signal_raw_to_float(raw_signal, dtype=dtype,
channel_indexes=self._global_channel_indexes[channel_indexes])
units = self.units
anasig = AnalogSignal(sig, units=units, copy=False, t_start=sig_t_start,
sampling_rate=self.sampling_rate, name=name,
file_origin=self.file_origin, description=self.description,
array_annotations=array_annotations, **self.annotations)
return anasig
class SpikeTrainProxy(BaseProxy):
'''
This object mimic SpikeTrain except that it does not
have the spike time nor waveforms.
All attributes and annotations are here.
The goal is to postpone the loading of data into memory
when reading a file with the new lazy load system based
on neo.rawio.
This object must not be constructed directly but is given
neo.io when lazy=True instead of a true SpikeTrain.
The SpikeTrainProxy is able to load:
* only a slice of time
* load wveforms or not.
* have an internal raw magnitude identic to the file (generally the ticks
of clock in int64) or the rescale to seconds.
Usage:
>>> proxy_sptr = SpikeTrainProxy(rawio=self.reader, unit_channel=0,
block_index=0, seg_index=0,)
>>> sptr = proxy_sptr.load()
>>> slice_of_sptr = proxy_sptr.load(time_slice=(1.*pq.s, 2.*pq.s))
'''
_single_parent_objects = ('Segment', 'Unit')
_quantity_attr = 'times'
_necessary_attrs = (('t_start', pq.Quantity, 0),
('t_stop', pq.Quantity, 0))
_recommended_attrs = ()
proxy_for = SpikeTrain
def __init__(self, rawio=None, unit_index=None, block_index=0, seg_index=0):
self._rawio = rawio
self._block_index = block_index
self._seg_index = seg_index
self._unit_index = unit_index
nb_spike = self._rawio.spike_count(block_index=block_index, seg_index=seg_index,
unit_index=unit_index)
self.shape = (nb_spike, )
self.t_start = self._rawio.segment_t_start(block_index, seg_index) * pq.s
self.t_stop = self._rawio.segment_t_stop(block_index, seg_index) * pq.s
# both necessary attr and annotations
annotations = {}
for k in ('name', 'id'):
annotations[k] = self._rawio.header['unit_channels'][unit_index][k]
ann = self._rawio.raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][unit_index]
annotations.update(ann)
h = self._rawio.header['unit_channels'][unit_index]
wf_sampling_rate = h['wf_sampling_rate']
if not np.isnan(wf_sampling_rate) and wf_sampling_rate > 0:
self.sampling_rate = wf_sampling_rate * pq.Hz
self.left_sweep = (h['wf_left_sweep'] / self.sampling_rate).rescale('s')
self._wf_units = ensure_signal_units(h['wf_units'])
else:
self.sampling_rate = None
self.left_sweep = None
BaseProxy.__init__(self, **annotations)
def load(self, time_slice=None, strict_slicing=True,
magnitude_mode='rescaled', load_waveforms=False):
'''
*Args*:
:time_slice: None or tuple of the time slice expressed with quantities.
None is the entire signal.
:strict_slicing: True by default.
Control if an error is raise or not when one of time_slice
member (t_start or t_stop) is outside the real time range of the segment.
:magnitude_mode: 'rescaled' or 'raw'.
:load_waveforms: bool load waveforms or not.
'''
t_start, t_stop = consolidate_time_slice(time_slice, self.t_start,
self.t_stop, strict_slicing)
_t_start, _t_stop = prepare_time_slice(time_slice)
spike_timestamps = self._rawio.get_spike_timestamps(block_index=self._block_index,
seg_index=self._seg_index, unit_index=self._unit_index, t_start=_t_start,
t_stop=_t_stop)
if magnitude_mode == 'raw':
# we must modify a bit the neo.rawio interface to also read the spike_timestamps
# underlying clock wich is not always same as sigs
raise(NotImplementedError)
elif magnitude_mode == 'rescaled':
dtype = 'float64'
spike_times = self._rawio.rescale_spike_timestamp(spike_timestamps, dtype=dtype)
units = 's'
if load_waveforms:
assert self.sampling_rate is not None, 'Do not have waveforms'
raw_wfs = self._rawio.get_spike_raw_waveforms(block_index=self._block_index,
seg_index=self._seg_index, unit_index=self._unit_index,
t_start=_t_start, t_stop=_t_stop)
if magnitude_mode == 'rescaled':
float_wfs = self._rawio.rescale_waveforms_to_float(raw_wfs,
dtype='float32', unit_index=self._unit_index)
waveforms = pq.Quantity(float_wfs, units=self._wf_units,
dtype='float32', copy=False)
elif magnitude_mode == 'raw':
# could code also CompundUnit here but it is over killed
# so we used dimentionless
waveforms = pq.Quantity(raw_wfs, units='',
dtype=raw_wfs.dtype, copy=False)
else:
waveforms = None
sptr = SpikeTrain(spike_times, t_stop, units=units, dtype=dtype,
t_start=t_start, copy=False, sampling_rate=self.sampling_rate,
waveforms=waveforms, left_sweep=self.left_sweep, name=self.name,
file_origin=self.file_origin, description=self.description, **self.annotations)
return sptr
class _EventOrEpoch(BaseProxy):
_single_parent_objects = ('Segment',)
_quantity_attr = 'times'
def __init__(self, rawio=None, event_channel_index=None, block_index=0, seg_index=0):
self._rawio = rawio
self._block_index = block_index
self._seg_index = seg_index
self._event_channel_index = event_channel_index
nb_event = self._rawio.event_count(block_index=block_index, seg_index=seg_index,
event_channel_index=event_channel_index)
self.shape = (nb_event, )
self.t_start = self._rawio.segment_t_start(block_index, seg_index) * pq.s
self.t_stop = self._rawio.segment_t_stop(block_index, seg_index) * pq.s
# both necessary attr and annotations
annotations = {}
for k in ('name', 'id'):
annotations[k] = self._rawio.header['event_channels'][event_channel_index][k]
ann = self._rawio.raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][event_channel_index]
annotations.update(ann)
BaseProxy.__init__(self, **annotations)
def load(self, time_slice=None, strict_slicing=True):
'''
*Args*:
:time_slice: None or tuple of the time slice expressed with quantities.
None is the entire signal.
:strict_slicing: True by default.
Control if an error is raise or not when one of time_slice member (t_start or t_stop)
is outside the real time range of the segment.
'''
t_start, t_stop = consolidate_time_slice(time_slice, self.t_start,
self.t_stop, strict_slicing)
_t_start, _t_stop = prepare_time_slice(time_slice)
timestamp, durations, labels = self._rawio.get_event_timestamps(block_index=self._block_index,
seg_index=self._seg_index, event_channel_index=self._event_channel_index,
t_start=_t_start, t_stop=_t_stop)
dtype = 'float64'
times = self._rawio.rescale_event_timestamp(timestamp, dtype=dtype)
units = 's'
if durations is not None:
durations = self._rawio.rescale_epoch_duration(durations, dtype=dtype) * pq.s
h = self._rawio.header['event_channels'][self._event_channel_index]
if h['type'] == b'event':
ret = Event(times=times, labels=labels, units='s',
name=self.name, file_origin=self.file_origin,
description=self.description, **self.annotations)
elif h['type'] == b'epoch':
ret = Epoch(times=times, durations=durations, labels=labels,
units='s',
name=self.name, file_origin=self.file_origin,
description=self.description, **self.annotations)
return ret
class EventProxy(_EventOrEpoch):
'''
This object mimic Event except that it does not
have the times nor labels.
All other attributes and annotations are here.
The goal is to postpone the loading of data into memory
when reading a file with the new lazy load system based
on neo.rawio.
This object must not be constructed directly but is given
neo.io when lazy=True instead of a true Event.
The EventProxy is able to load:
* only a slice of time
Usage:
>>> proxy_event = EventProxy(rawio=self.reader, event_channel_index=0,
block_index=0, seg_index=0,)
>>> event = proxy_event.load()
>>> slice_of_event = proxy_event.load(time_slice=(1.*pq.s, 2.*pq.s))
'''
_necessary_attrs = (('times', pq.Quantity, 1),
('labels', np.ndarray, 1, np.dtype('S')))
proxy_for = Event
class EpochProxy(_EventOrEpoch):
'''
This object mimic Epoch except that it does not
have the times nor labels nor durations.
All other attributes and annotations are here.
The goal is to postpone the loading of data into memory
when reading a file with the new lazy load system based
on neo.rawio.
This object must not be constructed directly but is given
neo.io when lazy=True instead of a true Epoch.
The EpochProxy is able to load:
* only a slice of time
Usage:
>>> proxy_epoch = EpochProxy(rawio=self.reader, event_channel_index=0,
block_index=0, seg_index=0,)
>>> epoch = proxy_epoch.load()
>>> slice_of_epoch = proxy_epoch.load(time_slice=(1.*pq.s, 2.*pq.s))
'''
_necessary_attrs = (('times', pq.Quantity, 1),
('durations', pq.Quantity, 1),
('labels', np.ndarray, 1, np.dtype('S')))
proxy_for = Epoch
proxyobjectlist = [AnalogSignalProxy, SpikeTrainProxy, EventProxy,
EpochProxy]
unit_convert = {'Volts': 'V', 'volts': 'V', 'Volt': 'V',
'volt': 'V', ' Volt': 'V', 'microV': 'uV',
# note that "micro" and "mu" are two different characters in Unicode
# although they mostly look the same. Here we accept both.
'µV': 'uV', 'μV': 'uV'}
def ensure_signal_units(units):
# test units
units = units.replace(' ', '')
if units in unit_convert:
units = unit_convert[units]
try:
units = pq.Quantity(1, units)
except:
logger.warning('Units "{}" can not be converted to a quantity. Using dimensionless '
'instead'.format(units))
units = ''
units = pq.Quantity(1, units)
return units
def check_annotations(annotations):
# force type to str for some keys
# imposed for tests
for k in ('name', 'description', 'file_origin'):
if k in annotations:
annotations[k] = str(annotations[k])
if 'coordinates' in annotations:
# some rawio expose some coordinates in annotations but is not standardized
# (x, y, z) or polar, at the moment it is more resonable to remove them
annotations.pop('coordinates')
return annotations
def ensure_second(v):
if isinstance(v, float):
return v * pq.s
elif isinstance(v, pq.Quantity):
return v.rescale('s')
elif isinstance(v, int):
return float(v) * pq.s
def prepare_time_slice(time_slice):
"""
This give clean time slice but keep None
for calling rawio slice
"""
if time_slice is None:
t_start, t_stop = None, None
else:
t_start, t_stop = time_slice
if t_start is not None:
t_start = ensure_second(t_start).rescale('s').magnitude
if t_stop is not None:
t_stop = ensure_second(t_stop).rescale('s').magnitude
return (t_start, t_stop)
def consolidate_time_slice(time_slice, seg_t_start, seg_t_stop, strict_slicing):
"""
This give clean time slice in quantity for t_start/t_stop of object
None is replace by seg limit.
"""
if time_slice is None:
t_start, t_stop = None, None
else:
t_start, t_stop = time_slice
if t_start is None:
t_start = seg_t_start
else:
if strict_slicing:
assert seg_t_start <= t_start <= seg_t_stop, 't_start is outside'
else:
t_start = max(t_start, seg_t_start)
t_start = ensure_second(t_start)
if t_stop is None:
t_stop = seg_t_stop
else:
if strict_slicing:
assert seg_t_start <= t_stop <= seg_t_stop, 't_stop is outside'
else:
t_stop = min(t_stop, seg_t_stop)
t_stop = ensure_second(t_stop)
return (t_start, t_stop)
def create_analogsignal_array_annotations(sig_annotations, global_channel_indexes):
"""
Create array_annotations from raw_annoations.
Since raw_annotation are not np.array but nested dict, this func
try to find keys in raw_annotation that are shared by all channel
and make array_annotation with it.
"""
# intersection of keys across channels
common_keys = None
for ind in global_channel_indexes:
keys = [k for k, v in sig_annotations[ind].items() if not \
isinstance(v, (list, tuple, np.ndarray))]
if common_keys is None:
common_keys = keys
else:
common_keys = [k for k in common_keys if k in keys]
# this is redundant and done with other name
for k in ['name', 'channel_id']:
if k in common_keys:
common_keys.remove(k)
array_annotations = {}
for k in common_keys:
values = [sig_annotations[ind][k] for ind in global_channel_indexes]
array_annotations[k] = np.array(values)
return array_annotations
|
JuliaSprenger/python-neo
|
neo/io/proxyobjects.py
|
Python
|
bsd-3-clause
| 25,541
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Creates ERTs and convergence figures for multiple algorithms."""
from __future__ import absolute_import
import os
import matplotlib.pyplot as plt
import numpy
from pdb import set_trace
from .. import toolsdivers, toolsstats, bestalg, pproc, genericsettings, htmldesc, ppfigparam
from ..ppfig import saveFigure
from ..pptex import color_to_latex, marker_to_latex, marker_to_html, writeLabels
# styles = [{'color': 'k', 'marker': 'o', 'markeredgecolor': 'k'},
# {'color': 'b'},
# {'color': 'c', 'marker': 'v', 'markeredgecolor': 'c'},
# {'color': 'g'},
# {'color': 'y', 'marker': '^', 'markeredgecolor': 'y'},
# {'color': 'm'},
# {'color': 'r', 'marker': 's', 'markeredgecolor': 'r'}] # sort of rainbow style
show_significance = 0.01 # for zero nothing is shown
scaling_figure_caption_start_fixed = (r"""Expected running time (\ERT\ in number of $f$-evaluations
as $\log_{10}$ value), divided by dimension for target function value $BBOBPPFIGSFTARGET$
versus dimension. Slanted grid lines indicate quadratic scaling with the dimension. """
)
scaling_figure_caption_start_rlbased = (r"""Expected running time (\ERT\ in number of $f$-evaluations
as $\log_{10}$ value) divided by dimension versus dimension. The target function value
is chosen such that the REFERENCE_ALGORITHM artificial algorithm just failed to achieve
an \ERT\ of $BBOBPPFIGSFTARGET\times\DIM$. """
)
scaling_figure_caption_end = (
r"Different symbols " +
r"correspond to different algorithms given in the legend of #1. " +
r"Light symbols give the maximum number of function evaluations from the longest trial " +
r"divided by dimension. " +
(r"Black stars indicate a statistically better result compared to all other algorithms " +
r"with $p<0.01$ and Bonferroni correction number of dimensions (six). ")
if show_significance else ''
)
ecdfs_figure_caption_standard = (
r"Bootstrapped empirical cumulative distribution of the number " +
r"of objective function evaluations divided by dimension " +
r"(FEvals/DIM) for 50 targets in $10^{[-8..2]}$ for all "+
r"functions and subgroups in #1-D. The ``best 2009'' line "+
r"corresponds to the best \ERT\ observed during BBOB 2009 " +
r"for each single target."
)
ecdfs_figure_caption_rlbased = (
r"Bootstrapped empirical cumulative distribution of the number " +
r"of objective function evaluations divided by dimension " +
r"(FEvals/DIM) for all functions and subgroups in #1-D." +
r" The targets are chosen from $10^{[-8..2]}$ " +
r"such that the REFERENCE_ALGORITHM artificial algorithm just " +
r"not reached them within a given budget of $k$ $\times$ DIM, " +
r"with $k\in \{0.5, 1.2, 3, 10, 50\}$. " +
r"The ``best 2009'' line " +
r"corresponds to the best \ERT\ observed during BBOB 2009 " +
r"for each selected target."
)
styles = genericsettings.line_styles
def fix_styles(number, styles=styles):
"""a short hack to fix length of styles"""
m = len(styles)
while len(styles) < number:
styles.append(styles[len(styles) % m])
for i in xrange(len(styles)):
styles[i].update({'linewidth': 5 - min([2, i/3.0]), # thinner lines over thicker lines
'markeredgewidth': 6 - min([2, i / 2.0]),
'markerfacecolor': 'None'})
refcolor = 'wheat'
show_algorithms = []
fontsize = 10.0
legend = False
def scaling_figure_caption(target):
# need to be used in rungenericmany.py!?
assert len(target) == 1
if isinstance(target, pproc.RunlengthBasedTargetValues):
s = scaling_figure_caption_start_rlbased.replace('BBOBPPFIGSFTARGET',
toolsdivers.number_to_latex(target.label(0)))
s = s.replace('REFERENCE_ALGORITHM', target.reference_algorithm)
else:
s = scaling_figure_caption_start_fixed.replace('BBOBPPFIGSFTARGET',
toolsdivers.number_to_latex(target.label(0)))
s += scaling_figure_caption_end
return s
def ecdfs_figure_caption(target):
assert len(target) == 1
if isinstance(target, pproc.RunlengthBasedTargetValues):
s = ecdfs_figure_caption_rlbased.replace('REFERENCE_ALGORITHM',
target.reference_algorithm)
else:
s = ecdfs_figure_caption_standard
return s
def scaling_figure_caption_html(target):
# need to be used in rungenericmany.py!?
assert len(target) == 1
if isinstance(target, pproc.RunlengthBasedTargetValues):
s = htmldesc.getValue('##bbobppfigslegendrlbased##').replace('BBOBPPFIGSFTARGET',
toolsdivers.number_to_html(target.label(0)))
s = s.replace('REFERENCEALGORITHM', target.reference_algorithm)
else:
s = htmldesc.getValue('##bbobppfigslegendfixed##').replace('BBOBPPFIGSFTARGET',
toolsdivers.number_to_html(target.label(0)))
if show_significance:
s += htmldesc.getValue('##bbobppfigslegendend##')
return s
def ecdfs_figure_caption_html(target, dimension):
assert len(target) == 1
if isinstance(target, pproc.RunlengthBasedTargetValues):
s = htmldesc.getValue('##bbobECDFslegendrlbased%d##' % dimension).replace('REFERENCEALGORITHM',
target.reference_algorithm)
else:
s = htmldesc.getValue('##bbobECDFslegendstandard%d##' % dimension)
return s
def plotLegend(handles, maxval=None):
"""Display right-side legend.
Sorted from smaller to larger y-coordinate values.
"""
ys = {}
lh = 0 # Number of labels to display on the right
if not maxval:
maxval = []
for h in handles:
x2 = []
y2 = []
for i in h:
x2.append(plt.getp(i, "xdata"))
x2 = numpy.sort(numpy.hstack(x2))
maxval.append(max(x2))
maxval = max(maxval)
for h in handles:
x2 = []
y2 = []
for i in h:
x2.append(plt.getp(i, "xdata"))
y2.append(plt.getp(i, "ydata"))
x2 = numpy.array(numpy.hstack(x2))
y2 = numpy.array(numpy.hstack(y2))
tmp = numpy.argsort(x2)
x2 = x2[tmp]
y2 = y2[tmp]
h = h[-1]
# ybis is used to sort in case of ties
try:
tmp = x2 <= maxval
y = y2[tmp][-1]
ybis = y2[tmp][y2[tmp] < y]
if len(ybis) > 0:
ybis = ybis[-1]
else:
ybis = y2[tmp][-2]
ys.setdefault(y, {}).setdefault(ybis, []).append(h)
lh += 1
except IndexError:
pass
if len(show_algorithms) > 0:
lh = min(lh, len(show_algorithms))
if lh <= 1:
lh = 2
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
i = 0 # loop over the elements of ys
for j in sorted(ys.keys()):
for k in sorted(ys[j].keys()):
#enforce best 2009 comes first in case of equality
tmp = []
for h in ys[j][k]:
if plt.getp(h, 'label') == 'best 2009':
tmp.insert(0, h)
else:
tmp.append(h)
#tmp.reverse()
ys[j][k] = tmp
for h in ys[j][k]:
if (not plt.getp(h, 'label').startswith('_line') and
(len(show_algorithms) == 0 or
plt.getp(h, 'label') in show_algorithms)):
y = 0.02 + i * 0.96/(lh-1)
# transform y in the axis coordinates
#inv = plt.gca().transLimits.inverted()
#legx, ydat = inv.transform((.9, y))
#leglabx, ydat = inv.transform((.92, y))
#set_trace()
ydat = 10**(y * numpy.log10(ymax/ymin)) * ymin
legx = 10**(.85 * numpy.log10(xmax/xmin)) * xmin
leglabx = 10**(.87 * numpy.log10(xmax/xmin)) * xmin
tmp = {}
for attr in ('lw', 'ls', 'marker',
'markeredgewidth', 'markerfacecolor',
'markeredgecolor', 'markersize', 'zorder'):
tmp[attr] = plt.getp(h, attr)
plt.plot((maxval, legx), (j, ydat),
color=plt.getp(h, 'markeredgecolor'), **tmp)
plt.text(leglabx, ydat,
plt.getp(h, 'label'), horizontalalignment="left",
verticalalignment="center", size=fontsize)
i += 1
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
if maxval:
plt.axvline(maxval, color='k')
def beautify(legend=False, rightlegend=False):
"""Customize figure format.
adding a legend, axis label, etc
:param bool legend: if True, display a box legend
:param bool rightlegend: if True, makes some space on the right for
legend
"""
# Get axis handle and set scale for each axis
axisHandle = plt.gca()
axisHandle.set_xscale("log")
try:
axisHandle.set_yscale("log")
except OverflowError:
set_trace()
# Grid options
axisHandle.yaxis.grid(True)
ymin, ymax = plt.ylim()
# quadratic slanted "grid"
if 1 < 3:
for i in xrange(-2, 7, 1 if ymax < 1e5 else 2):
plt.plot((0.2, 20000), (10**i, 10**(i + 5)), 'k:',
linewidth=0.5) # grid should be on top
else: # to be removed
plt.plot((2,200), (1, 1e2), 'k:', zorder=-1) # -1 -> plotted below?
# plt.plot((2,200), (1, 1e4), 'k:', zorder=-1)
plt.plot((2,200), (1e3, 1e5), 'k:', zorder=-1)
# plt.plot((2,200), (1e3, 1e7), 'k:', zorder=-1)
plt.plot((2,200), (1e6, 1e8), 'k:', zorder=-1)
# plt.plot((2,200), (1e6, 1e10), 'k:', zorder=-1)
plt.ylim(ymin=10**-0.2, ymax=ymax) # Set back the default maximum.
# ticks on axes
#axisHandle.invert_xaxis()
dimticklist = (2, 3, 5, 10, 20, 40) # TODO: should become input arg at some point?
dimannlist = (2, 3, 5, 10, 20, 40) # TODO: should become input arg at some point?
# TODO: All these should depend on (xlim, ylim)
axisHandle.set_xticks(dimticklist)
axisHandle.set_xticklabels([str(n) for n in dimannlist])
# axes limites
if rightlegend:
plt.xlim(1.8, 101) # 101 is 10 ** (numpy.log10(45/1.8)*1.25) * 1.8
else:
plt.xlim(1.8, 45) # Should depend on xmin and xmax
tmp = axisHandle.get_yticks()
tmp2 = []
for i in tmp:
tmp2.append('%d' % round(numpy.log10(i)))
axisHandle.set_yticklabels(tmp2)
if legend:
plt.legend(loc=0, numpoints=1)
def generateData(dataSet, target):
"""Returns an array of results to be plotted.
Oth column is ert, 1st is the success rate, 2nd the number of
successes, 3rd the mean of the number of function evaluations, and
4th the median of number of function evaluations of successful runs
or numpy.nan.
"""
res = []
data = dataSet.detEvals([target])[0]
succ = (numpy.isnan(data) == False)
data[numpy.isnan(data)] = dataSet.maxevals[numpy.isnan(data)]
res.extend(toolsstats.sp(data, issuccessful=succ, allowinf=False))
res.append(numpy.mean(data))
if res[2] > 0:
res.append(toolsstats.prctile(data[succ], 50)[0])
else:
res.append(numpy.nan)
res[3] = numpy.max(dataSet.maxevals)
return res
def main(dictAlg, htmlFilePrefix, isBiobjective, target, sortedAlgs=None, outputdir='ppdata', verbose=True):
"""From a DataSetList, returns figures showing the scaling: ERT/dim vs dim.
One function and one target per figure.
``target`` can be a scalar, a list with one element or a
``pproc.TargetValues`` instance with one target.
``sortedAlgs`` is a list of string-identifies (folder names)
"""
# target becomes a TargetValues "list" with one element
target = pproc.TargetValues.cast([target] if numpy.isscalar(target) else target)
latex_commands_filename = os.path.join(outputdir, 'bbob_pproc_commands.tex')
assert isinstance(target, pproc.TargetValues)
if len(target) != 1:
raise ValueError('only a single target can be managed in ppfigs, ' + str(len(target)) + ' targets were given')
funInfos = ppfigparam.read_fun_infos(isBiobjective)
dictFunc = pproc.dictAlgByFun(dictAlg)
if sortedAlgs is None:
sortedAlgs = sorted(dictAlg.keys())
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
for f in dictFunc:
filename = os.path.join(outputdir,'ppfigs_f%03d' % (f))
handles = []
fix_styles(len(sortedAlgs)) #
for i, alg in enumerate(sortedAlgs):
dictDim = dictFunc[f][alg].dictByDim() # this does not look like the most obvious solution
#Collect data
dimert = []
ert = []
dimnbsucc = []
ynbsucc = []
nbsucc = []
dimmaxevals = []
maxevals = []
dimmedian = []
medianfes = []
for dim in sorted(dictDim):
assert len(dictDim[dim]) == 1
entry = dictDim[dim][0]
data = generateData(entry, target((f, dim))[0]) # TODO: here we might want a different target for each function
if 1 < 3 or data[2] == 0: # No success
dimmaxevals.append(dim)
maxevals.append(float(data[3])/dim)
if data[2] > 0:
dimmedian.append(dim)
medianfes.append(data[4]/dim)
dimert.append(dim)
ert.append(float(data[0])/dim)
if data[1] < 1.:
dimnbsucc.append(dim)
ynbsucc.append(float(data[0])/dim)
nbsucc.append('%d' % data[2])
# Draw lines
if 1 < 3: # new version
# omit the line if a point in between is missing
for idim in range(len(dimert)):
# plot line only if next dim < 2.1*dim (a hack)
if idim < len(dimert) - 1 and dimert[idim + 1] < 2.1 * dimert[idim]:
tmp = plt.plot(dimert[idim:idim+2], ert[idim:idim+2], **styles[i]) #label=alg, )
else: # plot remaining single points (some twice)
tmp = plt.plot(dimert[idim], ert[idim], **styles[i]) #label=alg, )
plt.setp(tmp[0], markeredgecolor=plt.getp(tmp[0], 'color'))
else: # to be removed
tmp = plt.plot(dimert, ert, **styles[i]) #label=alg, )
plt.setp(tmp[0], markeredgecolor=plt.getp(tmp[0], 'color'))
# For legend
# tmp = plt.plot([], [], label=alg.replace('..' + os.sep, '').strip(os.sep), **styles[i])
algorithmName = toolsdivers.str_to_latex(toolsdivers.strip_pathname1(alg))
tmp = plt.plot([], [], label = algorithmName, **styles[i])
plt.setp(tmp[0], markersize=12.,
markeredgecolor=plt.getp(tmp[0], 'color'))
if dimmaxevals:
tmp = plt.plot(dimmaxevals, maxevals, **styles[i])
plt.setp(tmp[0], markersize=20, #label=alg,
markeredgecolor=plt.getp(tmp[0], 'color'),
markeredgewidth=1,
markerfacecolor='None', linestyle='None')
handles.append(tmp)
#tmp2 = plt.plot(dimmedian, medianfes, ls='', marker='+',
# markersize=30, markeredgewidth=5,
# markeredgecolor=plt.getp(tmp, 'color'))[0]
#for i, n in enumerate(nbsucc):
# plt.text(dimnbsucc[i], numpy.array(ynbsucc[i])*1.85, n,
# verticalalignment='bottom',
# horizontalalignment='center')
bestalgentries = bestalg.loadBestAlgorithm(isBiobjective)
if bestalgentries:
bestalgdata = []
dimbestalg = list(df[0] for df in bestalgentries if df[1] == f)
dimbestalg.sort()
dimbestalg2 = []
for d in dimbestalg:
entry = bestalgentries[(d, f)]
tmp = entry.detERT(target((f, d)))[0]
if numpy.isfinite(tmp):
bestalgdata.append(float(tmp)/d)
dimbestalg2.append(d)
tmp = plt.plot(dimbestalg2, bestalgdata, color=refcolor, linewidth=10,
marker='d', markersize=25, markeredgecolor=refcolor, zorder=-1
#label='best 2009',
)
handles.append(tmp)
if show_significance: # plot significance-stars
xstar, ystar = [], []
dims = sorted(pproc.dictAlgByDim(dictFunc[f]))
for i, dim in enumerate(dims):
datasets = pproc.dictAlgByDim(dictFunc[f])[dim]
assert all([len(datasets[ialg]) == 1 for ialg in sortedAlgs if datasets[ialg]])
dsetlist = [datasets[ialg][0] for ialg in sortedAlgs if datasets[ialg]]
if len(dsetlist) > 1:
arzp, arialg = toolsstats.significance_all_best_vs_other(dsetlist, target((f, dim)))
if arzp[0][1] * len(dims) < show_significance:
ert = dsetlist[arialg[0]].detERT(target((f, dim)))[0]
if ert < numpy.inf:
xstar.append(dim)
ystar.append(ert/dim)
plt.plot(xstar, ystar, 'k*', markerfacecolor=None, markeredgewidth=2, markersize=0.5*styles[0]['markersize'])
if f in funInfos.keys():
plt.gca().set_title(funInfos[f])
isLegend = False
if legend:
plotLegend(handles)
elif 1 < 3:
if f in (1, 24, 101, 130) and len(sortedAlgs) < 6: # 6 elements at most in the boxed legend
isLegend = True
beautify(legend=isLegend, rightlegend=legend)
plt.text(plt.xlim()[0], plt.ylim()[0], 'target ' + target.label_name() + ': ' + target.label(0)) # TODO: check
saveFigure(filename, verbose=verbose)
plt.close()
htmlFile = os.path.join(outputdir, htmlFilePrefix + '.html')
# generate commands in tex file:
try:
abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
alg_definitions = []
alg_definitions_html = ''
for i in range(len(sortedAlgs)):
symb = r'{%s%s}' % (color_to_latex(styles[i]['color']),
marker_to_latex(styles[i]['marker']))
symb_html = '<span style="color:%s;">%s</span>' % (styles[i]['color'], marker_to_html(styles[i]['marker']))
alg_definitions.append((', ' if i > 0 else '') + '%s: %s' % (symb, '\\algorithm' + abc[i % len(abc)]))
alg_definitions_html += (', ' if i > 0 else '') + '%s: %s' % (symb_html, toolsdivers.str_to_latex(toolsdivers.strip_pathname1(sortedAlgs[i])))
toolsdivers.prepend_to_file(latex_commands_filename,
[#'\\providecommand{\\bbobppfigsftarget}{\\ensuremath{10^{%s}}}'
# % target.loglabel(0), # int(numpy.round(numpy.log10(target))),
'\\providecommand{\\bbobppfigslegend}[1]{',
scaling_figure_caption(target),
'Legend: '] + alg_definitions + ['}']
)
toolsdivers.prepend_to_file(latex_commands_filename,
['\\providecommand{\\bbobECDFslegend}[1]{',
ecdfs_figure_caption(target), '}']
)
toolsdivers.replace_in_file(htmlFile, '##bbobppfigslegend##', scaling_figure_caption_html(target) + 'Legend: ' + alg_definitions_html)
toolsdivers.replace_in_file(htmlFile, '##bbobECDFslegend5##', ecdfs_figure_caption_html(target, 5))
toolsdivers.replace_in_file(htmlFile, '##bbobECDFslegend20##', ecdfs_figure_caption_html(target, 20))
if verbose:
print 'Wrote commands and legend to %s' % filename
# this is obsolete (however check templates)
filename = os.path.join(outputdir,'ppfigs.tex')
f = open(filename, 'w')
f.write('% Do not modify this file: calls to post-processing software'
+ ' will overwrite any modification.\n')
f.write('Legend: ')
for i in range(0, len(sortedAlgs)):
symb = r'{%s%s}' % (color_to_latex(styles[i]['color']),
marker_to_latex(styles[i]['marker']))
f.write((', ' if i > 0 else '') + '%s:%s' % (symb, writeLabels(sortedAlgs[i])))
f.close()
if verbose:
print '(obsolete) Wrote legend in %s' % filename
except IOError:
raise
handles.append(tmp)
if f in funInfos.keys():
plt.gca().set_title(funInfos[f])
beautify(rightlegend=legend)
if legend:
plotLegend(handles)
else:
if f in (1, 24, 101, 130):
plt.legend()
saveFigure(filename, figFormat=genericsettings.getFigFormats(), verbose=verbose)
plt.close()
|
oaelhara/numbbo
|
code-postprocessing/bbob_pproc/compall/ppfigs.py
|
Python
|
bsd-3-clause
| 22,256
|
#!/usr/bin/env python
from art.splqueryutils.sessions import *
def output_highly_similar_sessions(threshhold=.5):
out = open('similar_sessions.out', 'w')
jsonfiles = get_json_files(limit=1000*BYTES_IN_MB)
all_sessions = sessionize_searches(jsonfiles)
for (user, user_sessions) in all_sessions.iteritems():
compute_intrasession_similarity(user_sessions, normalized_edit_distance)
for (id, session_info) in user_sessions.iteritems():
if len(session_info['searches']) < 2:
continue
average_difference = sum(session_info['difference']) / float(len(session_info['difference']))
if average_difference < threshhold:
out.write('= = = = =\n')
out.write(user + '\t' + 'session ' + str(id) + '\n')
for (time, search) in session_info['searches']:
out.write('\t' + str(time) + '\t' + search.encode('ascii', 'ignore') + '\n')
out.flush()
out.close()
output_highly_similar_sessions()
|
stevedh/queryutils
|
scripts/print_similar_sessions.py
|
Python
|
bsd-3-clause
| 1,054
|
"""
Cement core handler module.
"""
import re
from ..core import exc, backend, meta
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class CementBaseHandler(meta.MetaMixin):
"""Base handler class that all Cement Handlers should subclass from."""
class Meta:
"""
Handler meta-data (can also be passed as keyword arguments to the
parent class).
"""
label = None
"""The string identifier of this handler."""
interface = None
"""The interface that this class implements."""
config_section = None
"""
A config [section] to merge config_defaults with.
Note: Though Meta.config_section defaults to None, Cement will
set this to the value of ``<interface_label>.<handler_label>`` if
no section is set by the user/developer.
"""
config_defaults = None
"""
A config dictionary that is merged into the applications config
in the [<config_section>] block. These are defaults and do not
override any existing defaults under that section.
"""
overridable = False
"""
Whether or not handler can be overridden by
``CementApp.Meta.handler_override_options``. Will be listed as an
available choice to override the specific handler (i.e.
``CementApp.Meta.output_handler``, etc).
"""
def __init__(self, **kw):
super(CementBaseHandler, self).__init__(**kw)
self.app = None
def _setup(self, app_obj):
"""
The _setup function is called during application initialization and
must 'setup' the handler object making it ready for the framework
or the application to make further calls to it.
:param app_obj: The application object.
:returns: None
"""
self.app = app_obj
if self._meta.config_section is None:
self._meta.config_section = "%s.%s" % \
(self._meta.interface.IMeta.label, self._meta.label)
if self._meta.config_defaults is not None:
LOG.debug("merging config defaults from '%s' " % self +
"into section '%s'" % self._meta.config_section)
dict_obj = dict()
dict_obj[self._meta.config_section] = self._meta.config_defaults
self.app.config.merge(dict_obj, override=False)
def get(handler_type, handler_label, *args):
"""
Get a handler object.
Required Arguments:
:param handler_type: The type of handler (i.e. 'output')
:type handler_type: str
:param handler_label: The label of the handler (i.e. 'json')
:type handler_label: str
:param fallback: A fallback value to return if handler_label doesn't
exist.
:returns: An uninstantiated handler object
:raises: cement.core.exc.FrameworkError
Usage:
from cement.core import handler
output = handler.get('output', 'json')
output.render(dict(foo='bar'))
"""
if handler_type not in backend.__handlers__:
raise exc.FrameworkError("handler type '%s' does not exist!" %
handler_type)
if handler_label in backend.__handlers__[handler_type]:
return backend.__handlers__[handler_type][handler_label]
elif len(args) > 0:
return args[0]
else:
raise exc.FrameworkError("handlers['%s']['%s'] does not exist!" %
(handler_type, handler_label))
def list(handler_type):
"""
Return a list of handlers for a given type.
:param handler_type: The type of handler (i.e. 'output')
:returns: List of handlers that match `type`.
:rtype: list
:raises: cement.core.exc.FrameworkError
"""
if handler_type not in backend.__handlers__:
raise exc.FrameworkError("handler type '%s' does not exist!" %
handler_type)
res = []
for label in backend.__handlers__[handler_type]:
if label == '__interface__':
continue
res.append(backend.__handlers__[handler_type][label])
return res
def define(interface):
"""
Define a handler based on the provided interface. Defines a handler type
based on <interface>.IMeta.label.
:param interface: The interface class that defines the interface to be
implemented by handlers.
:raises: cement.core.exc.InterfaceError
:raises: cement.core.exc.FrameworkError
Usage:
.. code-block:: python
from cement.core import handler
handler.define(IDatabaseHandler)
"""
if not hasattr(interface, 'IMeta'):
raise exc.InterfaceError("Invalid %s, " % interface +
"missing 'IMeta' class.")
if not hasattr(interface.IMeta, 'label'):
raise exc.InterfaceError("Invalid %s, " % interface +
"missing 'IMeta.label' class.")
LOG.debug("defining handler type '%s' (%s)" %
(interface.IMeta.label, interface.__name__))
if interface.IMeta.label in backend.__handlers__:
raise exc.FrameworkError("Handler type '%s' already defined!" %
interface.IMeta.label)
backend.__handlers__[interface.IMeta.label] = {'__interface__': interface}
def defined(handler_type):
"""
Test whether a handler type is defined.
:param handler_type: The name or 'type' of the handler (I.e. 'logging').
:returns: True if the handler type is defined, False otherwise.
:rtype: boolean
"""
if handler_type in backend.__handlers__:
return True
else:
return False
def register(handler_obj):
"""
Register a handler object to a handler. If the same object is already
registered then no exception is raised, however if a different object
attempts to be registered to the same name a FrameworkError is
raised.
:param handler_obj: The uninstantiated handler object to register.
:raises: cement.core.exc.InterfaceError
:raises: cement.core.exc.FrameworkError
Usage:
.. code-block:: python
from cement.core import handler
class MyDatabaseHandler(object):
class Meta:
interface = IDatabase
label = 'mysql'
def connect(self):
...
handler.register(MyDatabaseHandler)
"""
orig_obj = handler_obj
# for checks
obj = orig_obj()
if not hasattr(obj._meta, 'label') or not obj._meta.label:
raise exc.InterfaceError("Invalid handler %s, " % orig_obj +
"missing '_meta.label'.")
if not hasattr(obj._meta, 'interface') or not obj._meta.interface:
raise exc.InterfaceError("Invalid handler %s, " % orig_obj +
"missing '_meta.interface'.")
# translate dashes to underscores
orig_obj.Meta.label = re.sub('-', '_', obj._meta.label)
obj._meta.label = re.sub('-', '_', obj._meta.label)
handler_type = obj._meta.interface.IMeta.label
LOG.debug("registering handler '%s' into handlers['%s']['%s']" %
(orig_obj, handler_type, obj._meta.label))
if handler_type not in backend.__handlers__:
raise exc.FrameworkError("Handler type '%s' doesn't exist." %
handler_type)
if obj._meta.label in backend.__handlers__[handler_type] and \
backend.__handlers__[handler_type][obj._meta.label] != obj:
raise exc.FrameworkError("handlers['%s']['%s'] already exists" %
(handler_type, obj._meta.label))
interface = backend.__handlers__[handler_type]['__interface__']
if hasattr(interface.IMeta, 'validator'):
interface.IMeta().validator(obj)
else:
LOG.debug("Interface '%s' does not have a validator() function!" %
interface)
backend.__handlers__[handler_type][obj.Meta.label] = orig_obj
def registered(handler_type, handler_label):
"""
Check if a handler is registered.
:param handler_type: The type of handler (interface label)
:param handler_label: The label of the handler
:returns: True if the handler is registered, False otherwise
:rtype: boolean
"""
if handler_type in backend.__handlers__ and \
handler_label in backend.__handlers__[handler_type]:
return True
return False
def resolve(handler_type, handler_def, raise_error=True):
"""
Resolves the actual handler, as it can be either a string identifying
the handler to load from backend.__handlers__, or it can be an
instantiated or non-instantiated handler class.
:param handler_type: The type of handler (aka the interface label)
:param hander_def: The handler as defined in CementApp.Meta.
:type handler_def: str, uninstantiated object, or instantiated object
:param raise_error: Whether or not to raise an exception if unable
to resolve the handler.
:type raise_error: boolean
:returns: The instantiated handler object.
"""
han = None
if type(handler_def) == str:
han = get(handler_type, handler_def)()
elif hasattr(handler_def, '_meta'):
if not registered(handler_type, handler_def._meta.label):
register(handler_def.__class__)
han = handler_def
elif hasattr(handler_def, 'Meta'):
han = handler_def()
if not registered(handler_type, han._meta.label):
register(handler_def)
msg = "Unable to resolve handler '%s' of type '%s'" % \
(handler_def, handler_type)
if han is not None:
return han
elif han is None and raise_error:
raise exc.FrameworkError(msg)
elif han is None:
LOG.debug(msg)
return None
|
rjdp/cement
|
cement/core/handler.py
|
Python
|
bsd-3-clause
| 9,853
|
import typing
from ...core import AtomicExpr, Expr, Integer, Symbol, Tuple
from ...core.assumptions import StdFactKB
from ...core.decorators import _sympifyit, call_highest_priority
from ...core.logic import fuzzy_bool
from ...core.sympify import sympify
from ...functions import adjoint, conjugate
from ...logic import false
from ...simplify import simplify
from ..matrices import ShapeError
class MatrixExpr(Expr):
"""Superclass for Matrix Expressions
MatrixExprs represent abstract matrices, linear transformations represented
within a particular basis.
Examples
========
>>> A = MatrixSymbol('A', 3, 3)
>>> y = MatrixSymbol('y', 3, 1)
>>> x = (A.T*A).inverse() * A * y
See Also
========
MatrixSymbol
MatAdd
MatMul
Transpose
Inverse
"""
_op_priority = 11.0
is_Matrix = True
is_MatrixExpr = True
is_Identity: typing.Optional[bool] = None
is_Inverse = False
is_Transpose = False
is_ZeroMatrix = False
is_MatAdd = False
is_MatMul = False
def __new__(cls, *args, **kwargs):
args = map(sympify, args)
return Expr.__new__(cls, *args, **kwargs)
# The following is adapted from the core Expr object
def __neg__(self):
from .matmul import MatMul
return MatMul(-1, self).doit()
def __abs__(self):
raise NotImplementedError
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
from .matadd import MatAdd
return MatAdd(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
from .matadd import MatAdd
return MatAdd(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
from .matadd import MatAdd
return MatAdd(self, -other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
from .matadd import MatAdd
return MatAdd(other, -self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
from .matmul import MatMul
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
from .matmul import MatMul
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
from .inverse import Inverse
from .matpow import MatPow
if not self.is_square:
raise ShapeError(f'Power of non-square matrix {self}')
if self.is_Identity:
return self
elif other == -1:
return Inverse(self)
elif other == 0:
return Identity(self.rows)
elif other == 1:
return self
return MatPow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError('Matrix Power not defined')
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self * other**Integer(-1)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__truediv__')
def __rtruediv__(self, other):
raise NotImplementedError()
# return MatMul(other, Pow(self, -1))
@property
def rows(self):
return self.shape[0]
@property
def cols(self):
return self.shape[1]
@property
def is_square(self):
return self.rows == self.cols
def _eval_conjugate(self):
from .adjoint import Adjoint
from .transpose import Transpose
return Adjoint(Transpose(self))
def _eval_inverse(self):
from .inverse import Inverse
return Inverse(self)
def _eval_transpose(self):
from .transpose import Transpose
return Transpose(self)
def _eval_power(self, other):
from .matpow import MatPow
return MatPow(self, other)
def _eval_simplify(self, ratio, measure):
if self.is_Atom:
return self
else:
return self.__class__(*[simplify(x, ratio=ratio, measure=measure)
for x in self.args])
def _eval_adjoint(self):
from .adjoint import Adjoint
return Adjoint(self)
def _entry(self, i, j):
raise NotImplementedError('Indexing not implemented '
f'for {self.__class__.__name__}')
def adjoint(self):
return adjoint(self)
def conjugate(self):
return conjugate(self)
def transpose(self):
from .transpose import transpose
return transpose(self)
T = property(transpose, None, None, 'Matrix transposition.')
def inverse(self):
return self._eval_inverse()
def valid_index(self, i, j):
def is_valid(idx):
return isinstance(idx, (int, Integer, Symbol, Expr))
return (is_valid(i) and is_valid(j) and
(0 <= i) != false and (i < self.rows) != false and
(0 <= j) != false and (j < self.cols) != false)
def __getitem__(self, key):
if not isinstance(key, tuple) and isinstance(key, slice):
from .slice import MatrixSlice
return MatrixSlice(self, key, (0, None, 1))
if isinstance(key, tuple) and len(key) == 2:
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
from .slice import MatrixSlice
return MatrixSlice(self, i, j)
i, j = sympify(i), sympify(j)
if self.valid_index(i, j) is not False:
return self._entry(i, j)
else:
raise IndexError(f'Invalid indices ({i}, {j})')
elif isinstance(key, (int, Integer)):
# row-wise decomposition of matrix
rows, cols = self.shape
if not (isinstance(rows, Integer) and isinstance(cols, Integer)):
raise IndexError('Single index only supported for '
'non-symbolic matrix shapes.')
key = sympify(key)
i = key // cols
j = key % cols
if self.valid_index(i, j) is not False:
return self._entry(i, j)
else:
raise IndexError(f'Invalid index {key}')
elif isinstance(key, (Symbol, Expr)):
raise IndexError('Single index only supported for '
'non-symbolic indices.')
raise IndexError(f'Invalid index, wanted {self}[i,j]')
def as_explicit(self):
"""
Returns a dense Matrix with elements represented explicitly
Returns an object of type ImmutableMatrix.
Examples
========
>>> I = Identity(3)
>>> I
I
>>> I.as_explicit()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_mutable: returns mutable Matrix type
"""
from ..immutable import ImmutableMatrix
return ImmutableMatrix([[self[i, j]
for j in range(self.cols)]
for i in range(self.rows)])
def as_mutable(self):
"""
Returns a dense, mutable matrix with elements represented explicitly
Examples
========
>>> I = Identity(3)
>>> I
I
>>> I.shape
(3, 3)
>>> I.as_mutable()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_explicit: returns ImmutableMatrix
"""
return self.as_explicit().as_mutable()
def __array__(self):
from numpy import empty
a = empty(self.shape, dtype=object)
for i in range(self.rows):
for j in range(self.cols):
a[i, j] = self[i, j]
return a
def equals(self, other):
"""
Test elementwise equality between matrices, potentially of different
types
>>> Identity(3).equals(eye(3))
True
"""
if all(x.is_Integer for x in self.shape):
return self.as_explicit().equals(other)
def canonicalize(self):
return self
def as_coeff_mmul(self):
from .matmul import MatMul
return 1, MatMul(self)
class MatrixElement(Expr):
"""Element of the matrix expression."""
parent = property(lambda self: self.args[0])
i = property(lambda self: self.args[1])
j = property(lambda self: self.args[2])
_diff_wrt = True
def __new__(cls, name, n, m):
n, m = map(sympify, (n, m))
from .. import MatrixBase
if isinstance(name, MatrixBase):
if n.is_Integer and m.is_Integer:
return name[n, m]
name = sympify(name)
return Expr.__new__(cls, name, n, m)
def xreplace(self, rule):
if self in rule:
return rule[self]
else:
return self
class MatrixSymbol(MatrixExpr, AtomicExpr):
"""Symbolic representation of a Matrix object
Creates a Diofant Symbol to represent a Matrix. This matrix has a shape and
can be included in Matrix Expressions
>>> A = MatrixSymbol('A', 3, 4) # A 3 by 4 Matrix
>>> B = MatrixSymbol('B', 4, 3) # A 4 by 3 Matrix
>>> A.shape
(3, 4)
>>> 2*A*B + Identity(3)
I + 2*A*B
"""
is_Atom = True
is_number = False
def __new__(cls, name, n, m, **assumptions):
n, m = sympify(n), sympify(m)
is_commutative = fuzzy_bool(assumptions.get('commutative', False))
assumptions['commutative'] = is_commutative
obj = Expr.__new__(cls)
obj._name = name
obj._shape = (n, m)
obj._assumptions = StdFactKB(assumptions)
return obj
def _hashable_content(self):
return ((self.name, self.shape) +
tuple(sorted((k, v) for k, v in self._assumptions.items()
if v is not None)))
@property
def shape(self):
return self._shape
@property
def name(self):
return self._name
def _eval_subs(self, old, new):
# only do substitutions in shape
shape = Tuple(*self.shape)._subs(old, new)
return MatrixSymbol(self.name, *shape)
def __call__(self, *args):
raise TypeError(f'{self.__class__} object is not callable')
def _entry(self, i, j):
return MatrixElement(self, i, j)
@property
def free_symbols(self):
return {self}
def doit(self, **hints):
if hints.get('deep', True):
return type(self)(self.name,
*(_.doit(**hints) for _ in self.shape),
**self._assumptions._generator)
else:
return self
class Identity(MatrixExpr):
"""The Matrix Identity I - multiplicative identity
>>> A = MatrixSymbol('A', 3, 5)
>>> I = Identity(3)
>>> I*A
A
"""
is_Identity = True
def __new__(cls, n):
return super().__new__(cls, sympify(n))
@property
def rows(self):
return self.args[0]
@property
def cols(self):
return self.args[0]
@property
def shape(self):
return self.args[0], self.args[0]
def _eval_transpose(self):
return self
def _eval_trace(self):
return self.rows
def _eval_inverse(self):
return self
def conjugate(self):
return self
def _entry(self, i, j):
if i == j:
return Integer(1)
else:
return Integer(0)
def _eval_determinant(self):
return Integer(1)
class ZeroMatrix(MatrixExpr):
"""The Matrix Zero 0 - additive identity
>>> A = MatrixSymbol('A', 3, 5)
>>> Z = ZeroMatrix(3, 5)
>>> A+Z
A
>>> Z*A.T
0
"""
is_ZeroMatrix = True
def __new__(cls, m, n):
return super().__new__(cls, m, n)
@property
def shape(self):
return self.args[0], self.args[1]
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if other != 1 and not self.is_square:
raise ShapeError(f'Power of non-square matrix {self}')
if other == 0:
return Identity(self.rows)
if other < 1:
raise ValueError('Matrix det == 0; not invertible.')
return self
def _eval_transpose(self):
return ZeroMatrix(self.cols, self.rows)
def _eval_trace(self):
return Integer(0)
def _eval_determinant(self):
return Integer(0)
def conjugate(self):
return self
def _entry(self, i, j):
return Integer(0)
def __bool__(self):
return False
|
diofant/diofant
|
diofant/matrices/expressions/matexpr.py
|
Python
|
bsd-3-clause
| 13,116
|
# -*- coding: utf-8 -*-
from raw._ebuttlm import *
from raw import _ebuttlm as raw
from raw import _ebuttp as ebuttp
from pyxb.utils.domutils import BindingDOMSupport
namespace_prefix_map = {
'ebuttlm': Namespace,
'ebuttp': ebuttp.Namespace
}
class message_type(raw.message_type):
@classmethod
def __check_bds(cls, bds):
if bds:
return bds
else:
return BindingDOMSupport(
namespace_prefix_map=namespace_prefix_map
)
def toDOM(self, bds=None, parent=None, element_name=None):
return super(message_type, self).toDOM(
bds=self.__check_bds(bds),
parent=parent,
element_name=element_name
)
def toxml(self, encoding=None, bds=None, root_only=False, element_name=None):
dom = self.toDOM(self.__check_bds(bds), element_name=element_name)
if root_only:
dom = dom.documentElement
return dom.toprettyxml(
encoding=encoding,
indent=' '
)
raw.message_type._SetSupersedingClass(message_type)
|
ebu/ebu-tt-live-toolkit
|
ebu_tt_live/bindings/_ebuttlm.py
|
Python
|
bsd-3-clause
| 1,100
|
from __future__ import unicode_literals
from six import with_metaclass
import django
from django.forms.models import (
BaseModelFormSet, modelformset_factory,
ModelForm, _get_foreign_key, ModelFormMetaclass, ModelFormOptions
)
if django.VERSION >= (1, 8):
# RelatedObject has been replaced with ForeignObjectRel
from django.db.models.fields.related import ForeignObjectRel
else:
from django.db.models.fields.related import RelatedObject
from modelcluster.models import get_all_child_relations
class BaseTransientModelFormSet(BaseModelFormSet):
""" A ModelFormSet that doesn't assume that all its initial data instances exist in the db """
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_name = self.model._meta.pk.name
pk_key = "%s-%s" % (self.add_prefix(i), pk_name)
pk_val = self.data[pk_key]
if pk_val:
kwargs['instance'] = self.queryset.get(**{pk_name: pk_val})
else:
kwargs['instance'] = self.model()
elif i < self.initial_form_count():
kwargs['instance'] = self.get_queryset()[i]
elif self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i-self.initial_form_count()]
except IndexError:
pass
# bypass BaseModelFormSet's own _construct_form
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def transientmodelformset_factory(model, formset=BaseTransientModelFormSet, **kwargs):
return modelformset_factory(model, formset=formset, **kwargs)
class BaseChildFormSet(BaseTransientModelFormSet):
def __init__(self, data=None, files=None, instance=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance=instance
if django.VERSION >= (1, 8):
self.rel_name = ForeignObjectRel(self.fk, self.fk.rel.to, related_name=self.fk.rel.related_name).get_accessor_name()
else:
self.rel_name = RelatedObject(self.fk.rel.to, self.model, self.fk).get_accessor_name()
if queryset is None:
queryset = getattr(self.instance, self.rel_name).all()
super(BaseChildFormSet, self).__init__(data, files, queryset=queryset, **kwargs)
def save(self, commit=True):
# The base ModelFormSet's save(commit=False) will populate the lists
# self.changed_objects, self.deleted_objects and self.new_objects;
# use these to perform the appropriate updates on the relation's manager.
saved_instances = super(BaseChildFormSet, self).save(commit=False)
manager = getattr(self.instance, self.rel_name)
# if model has a sort_order_field defined, assign order indexes to the attribute
# named in it
if self.can_order and hasattr(self.model, 'sort_order_field'):
sort_order_field = getattr(self.model, 'sort_order_field')
for i, form in enumerate(self.ordered_forms):
setattr(form.instance, sort_order_field, i)
# If the manager has existing instances with a blank ID, we have no way of knowing
# whether these correspond to items in the submitted data. We'll assume that they do,
# as that's the most common case (i.e. the formset contains the full set of child objects,
# not just a selection of additions / updates) and so we delete all ID-less objects here
# on the basis that they will be re-added by the formset saving mechanism.
no_id_instances = [obj for obj in manager.all() if obj.pk is None]
if no_id_instances:
manager.remove(*no_id_instances)
manager.add(*saved_instances)
manager.remove(*self.deleted_objects)
if commit:
manager.commit()
return saved_instances
# Prior to Django 1.7, objects are deleted from the database even when commit=False:
# https://code.djangoproject.com/ticket/10284
# This was fixed in https://github.com/django/django/commit/65e03a424e82e157b4513cdebb500891f5c78363
# We rely on the fixed behaviour here, so until 1.7 ships we need to override save_existing_objects
# with a patched version.
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
try:
forms_to_delete = self.deleted_forms
except AttributeError:
forms_to_delete = []
for form in self.initial_forms:
pk_name = self._pk_field.name
raw_pk_value = form._raw_value(pk_name)
# clean() for different types of PK fields can sometimes return
# the model instance, and sometimes the PK. Handle either.
pk_value = form.fields[pk_name].clean(raw_pk_value)
pk_value = getattr(pk_value, 'pk', pk_value)
obj = self._existing_object(pk_value)
if form in forms_to_delete:
self.deleted_objects.append(obj)
# === BEGIN PATCH ===
if commit:
obj.delete()
# === END PATCH ===
continue
if form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def childformset_factory(parent_model, model, form=ModelForm,
formset=BaseChildFormSet, fk_name=None, fields=None, exclude=None,
extra=3, can_order=False, can_delete=True, max_num=None,
formfield_callback=None, widgets=None):
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
if exclude is None:
exclude = []
exclude += [fk.name]
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
# if the model supplies a sort_order_field, enable ordering regardless of
# the current setting of can_order
'can_order': (can_order or hasattr(model, 'sort_order_field')),
'fields': fields,
'exclude': exclude,
'max_num': max_num,
'widgets': widgets,
}
FormSet = transientmodelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
class ClusterFormOptions(ModelFormOptions):
def __init__(self, options=None):
super(ClusterFormOptions, self).__init__(options=options)
self.formsets = getattr(options, 'formsets', None)
self.exclude_formsets = getattr(options, 'exclude_formsets', None)
class ClusterFormMetaclass(ModelFormMetaclass):
extra_form_count = 3
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, ClusterForm)]
except NameError:
# We are defining ClusterForm itself.
parents = None
# grab any formfield_callback that happens to be defined in attrs -
# so that we can pass it on to child formsets - before ModelFormMetaclass deletes it.
# BAD METACLASS NO BISCUIT.
formfield_callback = attrs.get('formfield_callback')
new_class = super(ClusterFormMetaclass, cls).__new__(cls, name, bases, attrs)
if not parents:
return new_class
# ModelFormMetaclass will have set up new_class._meta as a ModelFormOptions instance;
# replace that with ClusterFormOptions so that we can access _meta.formsets
opts = new_class._meta = ClusterFormOptions(getattr(new_class, 'Meta', None))
if opts.model:
formsets = {}
for rel in get_all_child_relations(opts.model):
# to build a childformset class from this relation, we need to specify:
# - the base model (opts.model)
# - the child model (rel.field.model)
# - the fk_name from the child model to the base (rel.field.name)
rel_name = rel.get_accessor_name()
# apply 'formsets' and 'exclude_formsets' rules from meta
if opts.formsets is not None and rel_name not in opts.formsets:
continue
if opts.exclude_formsets and rel_name in opts.exclude_formsets:
continue
try:
widgets = opts.widgets.get(rel_name)
except AttributeError: # thrown if opts.widgets is None
widgets = None
kwargs = {
'extra': cls.extra_form_count,
'formfield_callback': formfield_callback,
'fk_name': rel.field.name,
'widgets': widgets
}
# see if opts.formsets looks like a dict; if so, allow the value
# to override kwargs
try:
kwargs.update(opts.formsets.get(rel_name))
except AttributeError:
pass
formset = childformset_factory(opts.model, rel.field.model, **kwargs)
formsets[rel_name] = formset
new_class.formsets = formsets
return new_class
class ClusterForm(with_metaclass(ClusterFormMetaclass, ModelForm)):
def __init__(self, data=None, files=None, instance=None, prefix=None, **kwargs):
super(ClusterForm, self).__init__(data, files, instance=instance, prefix=prefix, **kwargs)
self.formsets = {}
for rel_name, formset_class in self.__class__.formsets.items():
if prefix:
formset_prefix = "%s-%s" % (prefix, rel_name)
else:
formset_prefix = rel_name
self.formsets[rel_name] = formset_class(data, files, instance=instance, prefix=formset_prefix)
def as_p(self):
form_as_p = super(ClusterForm, self).as_p()
return form_as_p + ''.join([formset.as_p() for formset in self.formsets.values()])
def is_valid(self):
form_is_valid = super(ClusterForm, self).is_valid()
formsets_are_valid = all([formset.is_valid() for formset in self.formsets.values()])
return form_is_valid and formsets_are_valid
def save(self, commit=True):
instance = super(ClusterForm, self).save(commit=commit)
# ensure save_m2m is called even if commit = false. We don't fully support m2m fields yet,
# but if they perform save_form_data in a way that happens to play well with ClusterableModel
# (as taggit's manager does), we want that to take effect immediately, not just on db save
if not commit:
self.save_m2m()
for formset in self.formsets.values():
formset.instance = instance
formset.save(commit=commit)
return instance
|
thenewguy/django-modelcluster
|
modelcluster/forms.py
|
Python
|
bsd-3-clause
| 11,270
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2014
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** See LICENSE.TXT for license details
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
import sys
import os
"""
module to support both PySide and PyQt4 interfaces
from QtVariant import QtGui, QtCore
"""
default_variant = 'PySide'
variant = 'PySide'
#env_api = os.environ.get('QT_API', 'pyside')
#env_api = 'pyside'
#if '--pyside' in sys.argv:
# variant = 'PySide'
#elif '--pyqt4' in sys.argv:
# variant = 'PyQt4'
#elif env_api == 'pyside':
# variant = 'PySide'
#elif env_api == 'pyqt':
# variant = 'PyQt4'
#else:
# variant = default_variant
if variant == 'PySide':
from PySide import QtGui, QtCore
# This will be passed on to new versions of matplotlib
os.environ['QT_API'] = 'pyside'
def QtLoadUI(uifile):
from PySide import QtUiTools
loader = QtUiTools.QUiLoader()
uif = QtCore.QFile(uifile)
uif.open(QtCore.QFile.ReadOnly)
result = loader.load(uif)
uif.close()
return result
elif variant == 'PyQt4':
import sip
api2_classes = [
'QData', 'QDateTime', 'QString', 'QTextStream',
'QTime', 'QUrl', 'QVariant',
]
for cl in api2_classes:
sip.setapi(cl, 2)
from PyQt4 import QtGui, QtCore
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
QtCore.QString = str
os.environ['QT_API'] = 'pyqt'
def QtLoadUI(uifile):
from PyQt4 import uic
return uic.loadUi(uifile)
else:
raise ImportError("Python Variant not specified")
__all__ = [QtGui, QtCore, QtLoadUI, variant]
|
ncareol/lrose-soloPy
|
lrose_solopy/QtVariant.py
|
Python
|
bsd-3-clause
| 1,876
|
from pyglet.libs.darwin.cocoapy import *
class PygletWindow_Implementation:
PygletWindow = ObjCSubclass('NSWindow', 'PygletWindow')
@PygletWindow.method('B')
def canBecomeKeyWindow(self):
return True
# When the window is being resized, it enters into a mini event loop that
# only looks at mouseDragged and mouseUp events, blocking everything else.
# Among other things, this makes it impossible to run an NSTimer to call the
# idle() function in order to update the view during the resize. So we
# override this method, called by the resizing event loop, and call the
# idle() function from here. This *almost* works. I can't figure out what
# is happening at the very beginning of a resize event. The NSView's
# viewWillStartLiveResize method is called and then nothing happens until
# the mouse is dragged. I think NSApplication's nextEventMatchingMask_etc
# method is being called instead of this one. I don't really feel like
# subclassing NSApplication just to fix this. Also, to prevent white flashes
# while resizing, we must also call idle() from the view's reshape method.
@PygletWindow.method(b'@' + NSUIntegerEncoding + b'@@B')
def nextEventMatchingMask_untilDate_inMode_dequeue_(self, mask, date, mode, dequeue):
if self.inLiveResize():
# Call the idle() method while we're stuck in a live resize event.
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
event = send_super(self, 'nextEventMatchingMask:untilDate:inMode:dequeue:',
mask, date, mode, dequeue, argtypes=[NSUInteger, c_void_p, c_void_p, c_bool])
if event.value == None:
return 0
else:
return event.value
# Need this for set_size to not flash.
@PygletWindow.method(b'd' + NSRectEncoding)
def animationResizeTime_(self, newFrame):
return 0.0
class PygletToolWindow_Implementation:
PygletToolWindow = ObjCSubclass('NSPanel', 'PygletToolWindow')
@PygletToolWindow.method(b'@' + NSUIntegerEncoding + b'@@B')
def nextEventMatchingMask_untilDate_inMode_dequeue_(self, mask, date, mode, dequeue):
if self.inLiveResize():
# Call the idle() method while we're stuck in a live resize event.
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
event = send_super(self, 'nextEventMatchingMask:untilDate:inMode:dequeue:',
mask, date, mode, dequeue, argtypes=[NSUInteger, c_void_p, c_void_p, c_bool])
if event.value == None:
return 0
else:
return event.value
# Need this for set_size to not flash.
@PygletToolWindow.method(b'd' + NSRectEncoding)
def animationResizeTime_(self, newFrame):
return 0.0
PygletWindow = ObjCClass('PygletWindow')
PygletToolWindow = ObjCClass('PygletToolWindow')
|
bitcraft/pyglet
|
pyglet/window/cocoa/pyglet_window.py
|
Python
|
bsd-3-clause
| 3,018
|
"""empty message
Revision ID: 5aa994117f07
Revises: 85a1c0888f3d
Create Date: 2017-09-28 04:03:38.834496
"""
# revision identifiers, used by Alembic.
revision = '5aa994117f07'
down_revision = '85a1c0888f3d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('nu_release_item', sa.Column('fetch_attempts', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('nu_release_item', 'fetch_attempts')
### end Alembic commands ###
|
fake-name/ReadableWebProxy
|
alembic/versions/00032_5aa994117f07_.py
|
Python
|
bsd-3-clause
| 1,162
|
DEV_SERVER = True
DEBUG = True
DATABASES = {
"default": {
# "postgresql_psycopg2", "postgresql", "mysql", "sqlite3" or "oracle".
"ENGINE": "sqlite3",
# DB name or path to database file if using sqlite3.
"NAME": "cartridge.db",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
|
pygloo/bewype-mezzanine-project
|
mezzype/local_settings.py
|
Python
|
bsd-3-clause
| 558
|
from datetime import datetime
from decimal import Decimal
import os
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.forms.formsets import formset_factory
import commonware.log
import happyforms
from quieter_formset.formset import BaseFormSet
from tower import ugettext as _, ugettext_lazy as _lazy, ungettext as ngettext
from olympia import amo
from olympia.access import acl
from olympia.amo.fields import ColorField, ReCaptchaField
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import (
slug_validator, slugify, sorted_groupby, remove_icons)
from olympia.addons.models import (
Addon, AddonCategory, BlacklistedSlug, Category, Persona)
from olympia.addons.tasks import save_theme, save_theme_reupload
from olympia.addons.utils import reverse_name_lookup
from olympia.addons.widgets import IconWidgetRenderer, CategoriesSelectMultiple
from olympia.devhub import tasks as devhub_tasks
from olympia.tags.models import Tag
from olympia.translations import LOCALES
from olympia.translations.fields import TransField, TransTextarea
from olympia.translations.forms import TranslationFormMixin
from olympia.translations.models import Translation
from olympia.translations.utils import transfield_changed
from olympia.translations.widgets import TranslationTextInput
from olympia.users.models import UserEmailField
from olympia.versions.models import Version
log = commonware.log.getLogger('z.addons')
def clean_addon_name(name, instance=None, addon_type=None):
if not instance:
log.debug('clean_addon_name called without an instance: %s' % name)
# We don't need to do anything to prevent an unlisted addon name from
# clashing with listed addons, because the `reverse_name_lookup` util below
# uses the Addon.objects manager, which filters out unlisted addons.
if instance and not instance.is_listed:
return name
assert instance or addon_type
if not addon_type:
addon_type = instance.type
id = reverse_name_lookup(name, addon_type)
# If we get an id and either there's no instance or the instance.id != id.
if id and (not instance or id != instance.id):
raise forms.ValidationError(_('This name is already in use. Please '
'choose another.'))
return name
def clean_addon_slug(slug, instance):
slug_validator(slug, lower=False)
if slug != instance.slug:
if Addon.objects.filter(slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlacklistedSlug.blocked(slug):
raise forms.ValidationError(
_('The slug cannot be "%s". Please choose another.' % slug))
return slug
def clean_tags(request, tags):
target = [slugify(t, spaces=True, lower=True) for t in tags.split(',')]
target = set(filter(None, target))
min_len = amo.MIN_TAG_LENGTH
max_len = Tag._meta.get_field('tag_text').max_length
max_tags = amo.MAX_TAGS
total = len(target)
blacklisted = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, blacklisted=True))
if blacklisted:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ngettext('Invalid tag: {0}', 'Invalid tags: {0}',
len(blacklisted)).format(', '.join(blacklisted))
raise forms.ValidationError(msg)
restricted = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, restricted=True))
if not acl.action_allowed(request, 'Addons', 'Edit'):
if restricted:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ngettext('"{0}" is a reserved tag and cannot be used.',
'"{0}" are reserved tags and cannot be used.',
len(restricted)).format('", "'.join(restricted))
raise forms.ValidationError(msg)
else:
# Admin's restricted tags don't count towards the limit.
total = len(target - set(restricted))
if total > max_tags:
num = total - max_tags
msg = ngettext('You have {0} too many tags.',
'You have {0} too many tags.', num).format(num)
raise forms.ValidationError(msg)
if any(t for t in target if len(t) > max_len):
raise forms.ValidationError(
_('All tags must be %s characters or less after invalid characters'
' are removed.' % max_len))
if any(t for t in target if len(t) < min_len):
msg = ngettext("All tags must be at least {0} character.",
"All tags must be at least {0} characters.",
min_len).format(min_len)
raise forms.ValidationError(msg)
return target
class AddonFormBase(TranslationFormMixin, happyforms.ModelForm):
def __init__(self, *args, **kw):
self.request = kw.pop('request')
super(AddonFormBase, self).__init__(*args, **kw)
class Meta:
models = Addon
fields = ('name', 'slug', 'summary', 'tags')
def clean_slug(self):
return clean_addon_slug(self.cleaned_data['slug'], self.instance)
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def get_tags(self, addon):
if acl.action_allowed(self.request, 'Addons', 'Edit'):
return list(addon.tags.values_list('tag_text', flat=True))
else:
return list(addon.tags.filter(restricted=False)
.values_list('tag_text', flat=True))
class AddonFormBasic(AddonFormBase):
name = TransField(max_length=50)
slug = forms.CharField(max_length=30)
summary = TransField(widget=TransTextarea(attrs={'rows': 4}),
max_length=250)
tags = forms.CharField(required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'summary', 'tags')
def __init__(self, *args, **kw):
super(AddonFormBasic, self).__init__(*args, **kw)
self.fields['tags'].initial = ', '.join(self.get_tags(self.instance))
# Do not simply append validators, as validators will persist between
# instances.
def validate_name(name):
return clean_addon_name(name, self.instance)
name_validators = list(self.fields['name'].validators)
name_validators.append(validate_name)
self.fields['name'].validators = name_validators
def save(self, addon, commit=False):
tags_new = self.cleaned_data['tags']
tags_old = [slugify(t, spaces=True) for t in self.get_tags(addon)]
# Add new tags.
for t in set(tags_new) - set(tags_old):
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in set(tags_old) - set(tags_new):
Tag(tag_text=t).remove_tag(addon)
# We ignore `commit`, since we need it to be `False` so we can save
# the ManyToMany fields on our own.
addonform = super(AddonFormBasic, self).save(commit=False)
addonform.save()
return addonform
class AppFormBasic(AddonFormBasic):
"""Form to override name length for apps."""
name = TransField(max_length=128)
class CategoryForm(forms.Form):
application = forms.TypedChoiceField(amo.APPS_CHOICES, coerce=int,
widget=forms.HiddenInput,
required=False)
categories = forms.ModelMultipleChoiceField(
queryset=Category.objects.all(), widget=CategoriesSelectMultiple)
def save(self, addon):
application = self.cleaned_data.get('application')
categories_new = self.cleaned_data['categories']
categories_old = [cats for app, cats in addon.app_categories if
(app and application and app.id == application)
or (not app and not application)]
if categories_old:
categories_old = categories_old[0]
# Add new categories.
for c in set(categories_new) - set(categories_old):
AddonCategory(addon=addon, category=c).save()
# Remove old categories.
for c in set(categories_old) - set(categories_new):
AddonCategory.objects.filter(addon=addon, category=c).delete()
def clean_categories(self):
categories = self.cleaned_data['categories']
total = categories.count()
max_cat = amo.MAX_CATEGORIES
if getattr(self, 'disabled', False) and total:
raise forms.ValidationError(
_('Categories cannot be changed while your add-on is featured '
'for this application.'))
if total > max_cat:
# L10n: {0} is the number of categories.
raise forms.ValidationError(ngettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
has_misc = filter(lambda x: x.misc, categories)
if has_misc and total > 1:
raise forms.ValidationError(
_('The miscellaneous category cannot be combined with '
'additional categories.'))
return categories
class BaseCategoryFormSet(BaseFormSet):
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
self.request = kw.pop('request', None)
super(BaseCategoryFormSet, self).__init__(*args, **kw)
self.initial = []
apps = sorted(self.addon.compatible_apps.keys(),
key=lambda x: x.id)
# Drop any apps that don't have appropriate categories.
qs = Category.objects.filter(type=self.addon.type)
app_cats = dict((k, list(v)) for k, v in
sorted_groupby(qs, 'application'))
for app in list(apps):
if app and not app_cats.get(app.id):
apps.remove(app)
if not app_cats:
apps = []
for app in apps:
cats = dict(self.addon.app_categories).get(app, [])
self.initial.append({'categories': [c.id for c in cats]})
for app, form in zip(apps, self.forms):
key = app.id if app else None
form.request = self.request
form.initial['application'] = key
form.app = app
cats = sorted(app_cats[key], key=lambda x: x.name)
form.fields['categories'].choices = [(c.id, c.name) for c in cats]
# If this add-on is featured for this application, category
# changes are forbidden.
if not acl.action_allowed(self.request, 'Addons', 'Edit'):
form.disabled = (app and self.addon.is_featured(app))
def save(self):
for f in self.forms:
f.save(self.addon)
CategoryFormSet = formset_factory(form=CategoryForm,
formset=BaseCategoryFormSet, extra=0)
def icons():
"""
Generates a list of tuples for the default icons for add-ons,
in the format (pseudo-mime-type, description).
"""
icons = [('image/jpeg', 'jpeg'), ('image/png', 'png'), ('', 'default')]
dirs, files = storage.listdir(settings.ADDON_ICONS_DEFAULT_PATH)
for fname in files:
if '32' in fname and 'default' not in fname:
icon_name = fname.split('-')[0]
icons.append(('icon/%s' % icon_name, icon_name))
return icons
class AddonFormMedia(AddonFormBase):
icon_type = forms.CharField(widget=forms.RadioSelect(
renderer=IconWidgetRenderer, choices=[]), required=False)
icon_upload_hash = forms.CharField(required=False)
class Meta:
model = Addon
fields = ('icon_upload_hash', 'icon_type')
def __init__(self, *args, **kwargs):
super(AddonFormMedia, self).__init__(*args, **kwargs)
# Add icons here so we only read the directory when
# AddonFormMedia is actually being used.
self.fields['icon_type'].widget.choices = icons()
def save(self, addon, commit=True):
if self.cleaned_data['icon_upload_hash']:
upload_hash = self.cleaned_data['icon_upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'icon', upload_hash)
dirname = addon.get_icon_dir()
destination = os.path.join(dirname, '%s' % addon.id)
remove_icons(destination)
devhub_tasks.resize_icon.delay(upload_path, destination,
amo.ADDON_ICON_SIZES,
set_modified_on=[addon])
return super(AddonFormMedia, self).save(commit)
class AddonFormDetails(AddonFormBase):
default_locale = forms.TypedChoiceField(choices=LOCALES)
class Meta:
model = Addon
fields = ('description', 'default_locale', 'homepage')
def clean(self):
# Make sure we have the required translations in the new locale.
required = 'name', 'summary', 'description'
data = self.cleaned_data
if not self.errors and 'default_locale' in self.changed_data:
fields = dict((k, getattr(self.instance, k + '_id'))
for k in required)
locale = self.cleaned_data['default_locale']
ids = filter(None, fields.values())
qs = (Translation.objects.filter(locale=locale, id__in=ids,
localized_string__isnull=False)
.values_list('id', flat=True))
missing = [k for k, v in fields.items() if v not in qs]
# They might be setting description right now.
if 'description' in missing and locale in data['description']:
missing.remove('description')
if missing:
raise forms.ValidationError(
_('Before changing your default locale you must have a '
'name, summary, and description in that locale. '
'You are missing %s.') % ', '.join(map(repr, missing)))
return data
class AddonFormSupport(AddonFormBase):
support_url = TransField.adapt(forms.URLField)(required=False)
support_email = TransField.adapt(forms.EmailField)(required=False)
class Meta:
model = Addon
fields = ('support_email', 'support_url')
def __init__(self, *args, **kw):
super(AddonFormSupport, self).__init__(*args, **kw)
def save(self, addon, commit=True):
return super(AddonFormSupport, self).save(commit)
class AddonFormTechnical(AddonFormBase):
developer_comments = TransField(widget=TransTextarea, required=False)
class Meta:
model = Addon
fields = ('developer_comments', 'view_source', 'site_specific',
'external_software', 'auto_repackage', 'public_stats',
'whiteboard')
class AddonForm(happyforms.ModelForm):
name = forms.CharField(widget=TranslationTextInput,)
homepage = forms.CharField(widget=TranslationTextInput, required=False)
eula = forms.CharField(widget=TranslationTextInput,)
description = forms.CharField(widget=TranslationTextInput,)
developer_comments = forms.CharField(widget=TranslationTextInput,)
privacy_policy = forms.CharField(widget=TranslationTextInput,)
the_future = forms.CharField(widget=TranslationTextInput,)
the_reason = forms.CharField(widget=TranslationTextInput,)
support_email = forms.CharField(widget=TranslationTextInput,)
class Meta:
model = Addon
fields = ('name', 'homepage', 'default_locale', 'support_email',
'support_url', 'description', 'summary',
'developer_comments', 'eula', 'privacy_policy', 'the_reason',
'the_future', 'view_source', 'prerelease', 'site_specific',)
exclude = ('status', )
def clean_name(self):
return clean_addon_name(
self.cleaned_data['name'], instance=self.instance)
def save(self):
desc = self.data.get('description')
if desc and desc != unicode(self.instance.description):
amo.log(amo.LOG.EDIT_DESCRIPTIONS, self.instance)
if self.changed_data:
amo.log(amo.LOG.EDIT_PROPERTIES, self.instance)
super(AddonForm, self).save()
class AbuseForm(happyforms.Form):
recaptcha = ReCaptchaField(label='')
text = forms.CharField(required=True,
label='',
widget=forms.Textarea())
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
super(AbuseForm, self).__init__(*args, **kwargs)
if (not self.request.user.is_anonymous() or
not settings.NOBOT_RECAPTCHA_PRIVATE_KEY):
del self.fields['recaptcha']
class ThemeFormBase(AddonFormBase):
def __init__(self, *args, **kwargs):
super(ThemeFormBase, self).__init__(*args, **kwargs)
cats = Category.objects.filter(type=amo.ADDON_PERSONA, weight__gte=0)
cats = sorted(cats, key=lambda x: x.name)
self.fields['category'].choices = [(c.id, c.name) for c in cats]
for field in ('header', 'footer'):
self.fields[field].widget.attrs = {
'data-upload-url': reverse('devhub.personas.upload_persona',
args=['persona_%s' % field]),
'data-allowed-types': 'image/jpeg|image/png'
}
def clean_name(self):
return clean_addon_name(
self.cleaned_data['name'], addon_type=amo.ADDON_PERSONA)
def clean_slug(self):
return clean_addon_slug(self.cleaned_data['slug'], self.instance)
class ThemeForm(ThemeFormBase):
name = forms.CharField(max_length=50)
slug = forms.CharField(max_length=30)
category = forms.ModelChoiceField(queryset=Category.objects.all(),
widget=forms.widgets.RadioSelect)
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 4}),
max_length=500, required=False)
tags = forms.CharField(required=False)
license = forms.TypedChoiceField(
choices=amo.PERSONA_LICENSES_CHOICES,
coerce=int, empty_value=None, widget=forms.HiddenInput,
error_messages={'required': _lazy(u'A license must be selected.')})
header = forms.FileField(required=False)
header_hash = forms.CharField(widget=forms.HiddenInput)
footer = forms.FileField(required=False)
footer_hash = forms.CharField(widget=forms.HiddenInput, required=False)
# Native color picker doesn't allow real time tracking of user input
# and empty values, thus force the JavaScript color picker for now.
# See bugs 1005206 and 1003575.
accentcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
textcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
agreed = forms.BooleanField()
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors. It's really clever.
unsaved_data = forms.CharField(required=False, widget=forms.HiddenInput)
class Meta:
model = Addon
fields = ('name', 'slug', 'description', 'tags')
def save(self, commit=False):
data = self.cleaned_data
addon = Addon.objects.create(
slug=data.get('slug'),
status=amo.STATUS_PENDING, type=amo.ADDON_PERSONA)
addon.name = {'en-US': data['name']}
if data.get('description'):
addon.description = data['description']
addon._current_version = Version.objects.create(addon=addon,
version='0')
addon.save()
# Create Persona instance.
p = Persona()
p.persona_id = 0
p.addon = addon
p.header = 'header.png'
if data['footer_hash']:
p.footer = 'footer.png'
if data['accentcolor']:
p.accentcolor = data['accentcolor'].lstrip('#')
if data['textcolor']:
p.textcolor = data['textcolor'].lstrip('#')
p.license = data['license']
p.submit = datetime.now()
user = self.request.user
p.author = user.username
p.display_username = user.name
p.save()
# Save header, footer, and preview images.
save_theme.delay(data['header_hash'], data['footer_hash'], addon)
# Save user info.
addon.addonuser_set.create(user=user, role=amo.AUTHOR_ROLE_OWNER)
# Save tags.
for t in data['tags']:
Tag(tag_text=t).save_tag(addon)
# Save categories.
AddonCategory(addon=addon, category=data['category']).save()
return addon
class EditThemeForm(AddonFormBase):
name = TransField(max_length=50, label=_lazy('Give Your Theme a Name.'))
slug = forms.CharField(max_length=30)
category = forms.ModelChoiceField(queryset=Category.objects.all(),
widget=forms.widgets.RadioSelect)
description = TransField(
widget=TransTextarea(attrs={'rows': 4}),
max_length=500, required=False, label=_lazy('Describe your Theme.'))
tags = forms.CharField(required=False)
accentcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
textcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
license = forms.TypedChoiceField(
choices=amo.PERSONA_LICENSES_CHOICES, coerce=int, empty_value=None,
widget=forms.HiddenInput,
error_messages={'required': _lazy(u'A license must be selected.')})
# Theme re-upload.
header = forms.FileField(required=False)
header_hash = forms.CharField(widget=forms.HiddenInput, required=False)
footer = forms.FileField(required=False)
footer_hash = forms.CharField(widget=forms.HiddenInput, required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'description', 'tags')
def __init__(self, *args, **kw):
self.request = kw.pop('request')
super(AddonFormBase, self).__init__(*args, **kw)
addon = Addon.objects.no_cache().get(id=self.instance.id)
persona = addon.persona
# Do not simply append validators, as validators will persist between
# instances.
self.fields['name'].validators = list(self.fields['name'].validators)
self.fields['name'].validators.append(
lambda x: clean_addon_name(x, addon))
# Allow theme artists to localize Name and Description.
for trans in Translation.objects.filter(id=self.initial['name']):
self.initial['name_' + trans.locale.lower()] = trans
for trans in Translation.objects.filter(
id=self.initial['description']):
self.initial['description_' + trans.locale.lower()] = trans
self.old_tags = self.get_tags(addon)
self.initial['tags'] = ', '.join(self.old_tags)
if persona.accentcolor:
self.initial['accentcolor'] = '#' + persona.accentcolor
if persona.textcolor:
self.initial['textcolor'] = '#' + persona.textcolor
self.initial['license'] = persona.license
cats = sorted(Category.objects.filter(type=amo.ADDON_PERSONA,
weight__gte=0),
key=lambda x: x.name)
self.fields['category'].choices = [(c.id, c.name) for c in cats]
try:
self.initial['category'] = addon.categories.values_list(
'id', flat=True)[0]
except IndexError:
pass
for field in ('header', 'footer'):
self.fields[field].widget.attrs = {
'data-upload-url': reverse('devhub.personas.reupload_persona',
args=[addon.slug,
'persona_%s' % field]),
'data-allowed-types': 'image/jpeg|image/png'
}
def save(self):
addon = self.instance
persona = addon.persona
data = self.cleaned_data
# Update Persona-specific data.
persona_data = {
'license': int(data['license']),
'accentcolor': data['accentcolor'].lstrip('#'),
'textcolor': data['textcolor'].lstrip('#'),
'author': self.request.user.username,
'display_username': self.request.user.name
}
changed = False
for k, v in persona_data.iteritems():
if v != getattr(persona, k):
changed = True
setattr(persona, k, v)
if changed:
persona.save()
if self.changed_data:
amo.log(amo.LOG.EDIT_PROPERTIES, addon)
self.instance.modified = datetime.now()
# Update Addon-specific data.
changed = (
set(self.old_tags) != data['tags'] or # Check if tags changed.
self.initial['slug'] != data['slug'] or # Check if slug changed.
transfield_changed('description', self.initial, data) or
transfield_changed('name', self.initial, data))
if changed:
# Only save if addon data changed.
super(EditThemeForm, self).save()
# Update tags.
tags_new = data['tags']
tags_old = [slugify(t, spaces=True) for t in self.old_tags]
# Add new tags.
for t in set(tags_new) - set(tags_old):
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in set(tags_old) - set(tags_new):
Tag(tag_text=t).remove_tag(addon)
# Update category.
if data['category'].id != self.initial['category']:
addon_cat = addon.addoncategory_set.all()[0]
addon_cat.category = data['category']
addon_cat.save()
# Theme reupload.
if not addon.is_pending():
if data['header_hash'] or data['footer_hash']:
save_theme_reupload.delay(
data['header_hash'], data['footer_hash'], addon)
return data
class EditThemeOwnerForm(happyforms.Form):
owner = UserEmailField()
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
super(EditThemeOwnerForm, self).__init__(*args, **kw)
addon = self.instance
self.fields['owner'].widget.attrs['placeholder'] = _(
"Enter a new author's email address")
try:
self.instance_addonuser = addon.addonuser_set.all()[0]
self.initial['owner'] = self.instance_addonuser.user.email
except IndexError:
# If there was never an author before, then don't require one now.
self.instance_addonuser = None
self.fields['owner'].required = False
def save(self):
data = self.cleaned_data
if data.get('owner'):
changed = (not self.instance_addonuser or
self.instance_addonuser != data['owner'])
if changed:
# Update Persona-specific data.
persona = self.instance.persona
persona.author = data['owner'].username
persona.display_username = data['owner'].name
persona.save()
if not self.instance_addonuser:
# If there previously never another owner, create one.
self.instance.addonuser_set.create(user=data['owner'],
role=amo.AUTHOR_ROLE_OWNER)
elif self.instance_addonuser != data['owner']:
# If the owner has changed, update the `AddonUser` object.
self.instance_addonuser.user = data['owner']
self.instance_addonuser.role = amo.AUTHOR_ROLE_OWNER
self.instance_addonuser.save()
self.instance.modified = datetime.now()
self.instance.save()
return data
class ContributionForm(happyforms.Form):
amount = forms.DecimalField(required=True, min_value=Decimal('0.01'))
|
jpetto/olympia
|
src/olympia/addons/forms.py
|
Python
|
bsd-3-clause
| 28,545
|
import django_filters
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
try:
from django_filters import rest_framework as filters
except ImportError: # Back-ward compatible for django-rest-framework<3.7
from rest_framework import filters
from rest_framework import viewsets
from teryt_tree.models import JednostkaAdministracyjna
from teryt_tree.rest_framework_ext.serializers import JednostkaAdministracyjnaSerializer
def custom_area_filter(queryset, _, value):
if not value:
return queryset
return queryset.area(get_object_or_404(JednostkaAdministracyjna, pk=value))
class JednostkaAdministracyjnaFilter(filters.FilterSet):
area = django_filters.CharFilter(method=custom_area_filter, label=_("Area"))
class Meta:
model = JednostkaAdministracyjna
fields = ["name", "category", "category__level", "area"]
class JednostkaAdministracyjnaViewSet(viewsets.ModelViewSet):
queryset = (
JednostkaAdministracyjna.objects.select_related("category")
.prefetch_related("children")
.all()
)
serializer_class = JednostkaAdministracyjnaSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = JednostkaAdministracyjnaFilter
|
ad-m/django-teryt-tree
|
teryt_tree/rest_framework_ext/viewsets.py
|
Python
|
bsd-3-clause
| 1,281
|
import warnings
from django.test import TestCase, Client
from django.test.utils import override_settings
from django.http import HttpRequest, Http404
from wagtail.wagtailcore.models import Page, Site
from wagtail.tests.models import EventPage, EventIndex, SimplePage, PageWithOldStyleRouteMethod
class TestSiteRouting(TestCase):
fixtures = ['test.json']
def setUp(self):
self.default_site = Site.objects.get(is_default_site=True)
events_page = Page.objects.get(url_path='/home/events/')
about_page = Page.objects.get(url_path='/home/about-us/')
self.events_site = Site.objects.create(hostname='events.example.com', root_page=events_page)
self.alternate_port_events_site = Site.objects.create(hostname='events.example.com', root_page=events_page, port='8765')
self.about_site = Site.objects.create(hostname='about.example.com', root_page=about_page)
self.unrecognised_port = '8000'
self.unrecognised_hostname = 'unknown.site.com'
def test_no_host_header_routes_to_default_site(self):
# requests without a Host: header should be directed to the default site
request = HttpRequest()
request.path = '/'
self.assertEqual(Site.find_for_request(request), self.default_site)
def test_valid_headers_route_to_specific_site(self):
# requests with a known Host: header should be directed to the specific site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.events_site.hostname
request.META['SERVER_PORT'] = self.events_site.port
self.assertEqual(Site.find_for_request(request), self.events_site)
def test_ports_in_request_headers_are_respected(self):
# ports in the Host: header should be respected
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.alternate_port_events_site.hostname
request.META['SERVER_PORT'] = self.alternate_port_events_site.port
self.assertEqual(Site.find_for_request(request), self.alternate_port_events_site)
def test_unrecognised_host_header_routes_to_default_site(self):
# requests with an unrecognised Host: header should be directed to the default site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.unrecognised_hostname
request.META['SERVER_PORT'] = '80'
self.assertEqual(Site.find_for_request(request), self.default_site)
def test_unrecognised_port_and_default_host_routes_to_default_site(self):
# requests to the default host on an unrecognised port should be directed to the default site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.default_site.hostname
request.META['SERVER_PORT'] = self.unrecognised_port
self.assertEqual(Site.find_for_request(request), self.default_site)
def test_unrecognised_port_and_unrecognised_host_routes_to_default_site(self):
# requests with an unrecognised Host: header _and_ an unrecognised port
# hould be directed to the default site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.unrecognised_hostname
request.META['SERVER_PORT'] = self.unrecognised_port
self.assertEqual(Site.find_for_request(request), self.default_site)
def test_unrecognised_port_on_known_hostname_routes_there_if_no_ambiguity(self):
# requests on an unrecognised port should be directed to the site with
# matching hostname if there is no ambiguity
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.about_site.hostname
request.META['SERVER_PORT'] = self.unrecognised_port
self.assertEqual(Site.find_for_request(request), self.about_site)
def test_unrecognised_port_on_known_hostname_routes_to_default_site_if_ambiguity(self):
# requests on an unrecognised port should be directed to the default
# site, even if their hostname (but not port) matches more than one
# other entry
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = self.events_site.hostname
request.META['SERVER_PORT'] = self.unrecognised_port
self.assertEqual(Site.find_for_request(request), self.default_site)
def test_port_in_http_host_header_is_ignored(self):
# port in the HTTP_HOST header is ignored
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = "%s:%s" % (self.events_site.hostname, self.events_site.port)
request.META['SERVER_PORT'] = self.alternate_port_events_site.port
self.assertEqual(Site.find_for_request(request), self.alternate_port_events_site)
class TestRouting(TestCase):
fixtures = ['test.json']
# need to clear urlresolver caches before/after tests, because we override ROOT_URLCONF
# in some tests here
def setUp(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def test_urls(self):
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# Basic installation only has one site configured, so page.url will return local URLs
self.assertEqual(homepage.full_url, 'http://localhost/')
self.assertEqual(homepage.url, '/')
self.assertEqual(homepage.relative_url(default_site), '/')
self.assertEqual(christmas_page.full_url, 'http://localhost/events/christmas/')
self.assertEqual(christmas_page.url, '/events/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), '/events/christmas/')
def test_urls_with_multiple_sites(self):
events_page = Page.objects.get(url_path='/home/events/')
events_site = Site.objects.create(hostname='events.example.com', root_page=events_page)
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# with multiple sites, page.url will return full URLs to ensure that
# they work across sites
self.assertEqual(homepage.full_url, 'http://localhost/')
self.assertEqual(homepage.url, 'http://localhost/')
self.assertEqual(homepage.relative_url(default_site), '/')
self.assertEqual(homepage.relative_url(events_site), 'http://localhost/')
self.assertEqual(christmas_page.full_url, 'http://events.example.com/christmas/')
self.assertEqual(christmas_page.url, 'http://events.example.com/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), 'http://events.example.com/christmas/')
self.assertEqual(christmas_page.relative_url(events_site), '/christmas/')
@override_settings(ROOT_URLCONF='wagtail.tests.non_root_urls')
def test_urls_with_non_root_urlconf(self):
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# Basic installation only has one site configured, so page.url will return local URLs
self.assertEqual(homepage.full_url, 'http://localhost/site/')
self.assertEqual(homepage.url, '/site/')
self.assertEqual(homepage.relative_url(default_site), '/site/')
self.assertEqual(christmas_page.full_url, 'http://localhost/site/events/christmas/')
self.assertEqual(christmas_page.url, '/site/events/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), '/site/events/christmas/')
def test_request_routing(self):
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
request = HttpRequest()
request.path = '/events/christmas/'
(found_page, args, kwargs) = homepage.route(request, ['events', 'christmas'])
self.assertEqual(found_page, christmas_page)
def test_request_serving(self):
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
request = HttpRequest()
response = christmas_page.serve(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context_data['self'], christmas_page)
used_template = response.resolve_template(response.template_name)
self.assertEqual(used_template.name, 'tests/event_page.html')
def test_route_to_unknown_page_returns_404(self):
homepage = Page.objects.get(url_path='/home/')
request = HttpRequest()
request.path = '/events/quinquagesima/'
with self.assertRaises(Http404):
homepage.route(request, ['events', 'quinquagesima'])
def test_route_to_unpublished_page_returns_404(self):
homepage = Page.objects.get(url_path='/home/')
request = HttpRequest()
request.path = '/events/tentative-unpublished-event/'
with self.assertRaises(Http404):
homepage.route(request, ['events', 'tentative-unpublished-event'])
class TestServeView(TestCase):
fixtures = ['test.json']
def setUp(self):
# Explicitly clear the cache of site root paths. Normally this would be kept
# in sync by the Site.save logic, but this is bypassed when the database is
# rolled back between tests using transactions.
from django.core.cache import cache
cache.delete('wagtail_site_root_paths')
# also need to clear urlresolver caches before/after tests, because we override
# ROOT_URLCONF in some tests here
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def test_serve(self):
response = self.client.get('/events/christmas/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'tests/event_page.html')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(response.context['self'], christmas_page)
self.assertContains(response, '<h1>Christmas</h1>')
self.assertContains(response, '<h2>Event</h2>')
@override_settings(ROOT_URLCONF='wagtail.tests.non_root_urls')
def test_serve_with_non_root_urls(self):
response = self.client.get('/site/events/christmas/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'tests/event_page.html')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(response.context['self'], christmas_page)
self.assertContains(response, '<h1>Christmas</h1>')
self.assertContains(response, '<h2>Event</h2>')
def test_serve_unknown_page_returns_404(self):
response = self.client.get('/events/quinquagesima/')
self.assertEqual(response.status_code, 404)
def test_serve_unpublished_page_returns_404(self):
response = self.client.get('/events/tentative-unpublished-event/')
self.assertEqual(response.status_code, 404)
def test_serve_with_multiple_sites(self):
events_page = Page.objects.get(url_path='/home/events/')
Site.objects.create(hostname='events.example.com', root_page=events_page)
response = self.client.get('/christmas/', HTTP_HOST='events.example.com')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'tests/event_page.html')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(response.context['self'], christmas_page)
self.assertContains(response, '<h1>Christmas</h1>')
self.assertContains(response, '<h2>Event</h2>')
# same request to the default host should return a 404
c = Client()
response = c.get('/christmas/', HTTP_HOST='localhost')
self.assertEqual(response.status_code, 404)
def test_serve_with_custom_context(self):
response = self.client.get('/events/')
self.assertEqual(response.status_code, 200)
# should render the whole page
self.assertContains(response, '<h1>Events</h1>')
# response should contain data from the custom 'events' context variable
self.assertContains(response, '<a href="/events/christmas/">Christmas</a>')
def test_ajax_response(self):
response = self.client.get('/events/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# should only render the content of includes/event_listing.html, not the whole page
self.assertNotContains(response, '<h1>Events</h1>')
self.assertContains(response, '<a href="/events/christmas/">Christmas</a>')
def test_before_serve_hook(self):
response = self.client.get('/events/', HTTP_USER_AGENT='GoogleBot')
self.assertContains(response, 'bad googlebot no cookie')
class TestStaticSitePaths(TestCase):
def setUp(self):
self.root_page = Page.objects.get(id=1)
# For simple tests
self.home_page = self.root_page.add_child(instance=SimplePage(title="Homepage", slug="home"))
self.about_page = self.home_page.add_child(instance=SimplePage(title="About us", slug="about"))
self.contact_page = self.home_page.add_child(instance=SimplePage(title="Contact", slug="contact"))
# For custom tests
self.event_index = self.root_page.add_child(instance=EventIndex(title="Events", slug="events"))
for i in range(20):
self.event_index.add_child(instance=EventPage(title="Event " + str(i), slug="event" + str(i)))
def test_local_static_site_paths(self):
paths = list(self.about_page.get_static_site_paths())
self.assertEqual(paths, ['/'])
def test_child_static_site_paths(self):
paths = list(self.home_page.get_static_site_paths())
self.assertEqual(paths, ['/', '/about/', '/contact/'])
def test_custom_static_site_paths(self):
paths = list(self.event_index.get_static_site_paths())
# Event index path
expected_paths = ['/']
# One path for each page of results
expected_paths.extend(['/' + str(i + 1) + '/' for i in range(5)])
# One path for each event page
expected_paths.extend(['/event' + str(i) + '/' for i in range(20)])
paths.sort()
expected_paths.sort()
self.assertEqual(paths, expected_paths)
class TestMovePage(TestCase):
fixtures = ['test.json']
def test_move_page(self):
about_us_page = SimplePage.objects.get(url_path='/home/about-us/')
events_index = EventIndex.objects.get(url_path='/home/events/')
events_index.move(about_us_page, pos='last-child')
# re-fetch events index to confirm that db fields have been updated
events_index = EventIndex.objects.get(id=events_index.id)
self.assertEqual(events_index.url_path, '/home/about-us/events/')
self.assertEqual(events_index.depth, 4)
self.assertEqual(events_index.get_parent().id, about_us_page.id)
# children of events_index should also have been updated
christmas = events_index.get_children().get(slug='christmas')
self.assertEqual(christmas.depth, 5)
self.assertEqual(christmas.url_path, '/home/about-us/events/christmas/')
class TestPrevNextSiblings(TestCase):
fixtures = ['test.json']
def test_get_next_siblings(self):
christmas_event = Page.objects.get(url_path='/home/events/christmas/')
self.assertTrue(christmas_event.get_next_siblings().filter(url_path='/home/events/final-event/').exists())
def test_get_next_siblings_inclusive(self):
christmas_event = Page.objects.get(url_path='/home/events/christmas/')
# First element must always be the current page
self.assertEqual(christmas_event.get_next_siblings(inclusive=True).first(), christmas_event)
def test_get_prev_siblings(self):
final_event = Page.objects.get(url_path='/home/events/final-event/')
self.assertTrue(final_event.get_prev_siblings().filter(url_path='/home/events/christmas/').exists())
# First element must always be the current page
self.assertEqual(final_event.get_prev_siblings(inclusive=True).first(), final_event)
class TestCopyPage(TestCase):
fixtures = ['test.json']
def test_copy_page_copies(self):
about_us = SimplePage.objects.get(url_path='/home/about-us/')
# Copy it
new_about_us = about_us.copy(update_attrs={'title': "New about us", 'slug': 'new-about-us'})
# Check that new_about_us is correct
self.assertIsInstance(new_about_us, SimplePage)
self.assertEqual(new_about_us.title, "New about us")
self.assertEqual(new_about_us.slug, 'new-about-us')
# Check that new_about_us is a different page
self.assertNotEqual(about_us.id, new_about_us.id)
# Check that the url path was updated
self.assertEqual(new_about_us.url_path, '/home/new-about-us/')
def test_copy_page_copies_child_objects(self):
christmas_event = EventPage.objects.get(url_path='/home/events/christmas/')
# Copy it
new_christmas_event = christmas_event.copy(update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'})
# Check that the speakers were copied
self.assertEqual(new_christmas_event.speakers.count(), 1, "Child objects weren't copied")
# Check that the speakers weren't removed from old page
self.assertEqual(christmas_event.speakers.count(), 1, "Child objects were removed from the original page")
# Check that advert placements were also copied (there's a gotcha here, since the advert_placements
# relation is defined on Page, not EventPage)
self.assertEqual(new_christmas_event.advert_placements.count(), 1, "Child objects defined on the superclass weren't copied")
self.assertEqual(christmas_event.advert_placements.count(), 1, "Child objects defined on the superclass were removed from the original page")
def test_copy_page_copies_child_objects_with_nonspecific_class(self):
# Get chrismas page as Page instead of EventPage
christmas_event = Page.objects.get(url_path='/home/events/christmas/')
# Copy it
new_christmas_event = christmas_event.copy(update_attrs={'title': "New christmas event", 'slug': 'new-christmas-event'})
# Check that the type of the new page is correct
self.assertIsInstance(new_christmas_event, EventPage)
# Check that the speakers were copied
self.assertEqual(new_christmas_event.speakers.count(), 1, "Child objects weren't copied")
def test_copy_page_copies_recursively(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
# Copy it
new_events_index = events_index.copy(recursive=True, update_attrs={'title': "New events index", 'slug': 'new-events-index'})
# Get christmas event
old_christmas_event = events_index.get_children().filter(slug='christmas').first()
new_christmas_event = new_events_index.get_children().filter(slug='christmas').first()
# Check that the event exists in both places
self.assertNotEqual(new_christmas_event, None, "Child pages weren't copied")
self.assertNotEqual(old_christmas_event, None, "Child pages were removed from original page")
# Check that the url path was updated
self.assertEqual(new_christmas_event.url_path, '/home/new-events-index/christmas/')
def test_copy_page_copies_recursively_with_child_objects(self):
events_index = EventIndex.objects.get(url_path='/home/events/')
# Copy it
new_events_index = events_index.copy(recursive=True, update_attrs={'title': "New events index", 'slug': 'new-events-index'})
# Get christmas event
old_christmas_event = events_index.get_children().filter(slug='christmas').first()
new_christmas_event = new_events_index.get_children().filter(slug='christmas').first()
# Check that the speakers were copied
self.assertEqual(new_christmas_event.specific.speakers.count(), 1, "Child objects weren't copied")
# Check that the speakers weren't removed from old page
self.assertEqual(old_christmas_event.specific.speakers.count(), 1, "Child objects were removed from the original page")
|
thenewguy/wagtail
|
wagtail/wagtailcore/tests/test_page_model.py
|
Python
|
bsd-3-clause
| 21,035
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from functools import total_ordering
from itertools import chain
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.html import escape
from django.utils.safestring import mark_safe
class Sequence(list):
'''
Represents a column sequence, e.g. ``('first_name', '...', 'last_name')``
This is used to represent `.Table.Meta.sequence` or the `.Table`
constructors's *sequence* keyword argument.
The sequence must be a list of column names and is used to specify the
order of the columns on a table. Optionally a '...' item can be inserted,
which is treated as a *catch-all* for column names that aren't explicitly
specified.
'''
def expand(self, columns):
'''
Expands the ``'...'`` item in the sequence into the appropriate column
names that should be placed there.
:raises: `ValueError` if the sequence is invalid for the columns.
'''
ellipses = self.count("...")
if ellipses > 1:
raise ValueError("'...' must be used at most once in a sequence.")
elif ellipses == 0:
self.append("...")
# everything looks good, let's expand the "..." item
columns = list(columns) # take a copy and exhaust the generator
head = []
tail = []
target = head # start by adding things to the head
for name in self:
if name == "...":
# now we'll start adding elements to the tail
target = tail
continue
target.append(name)
if name in columns:
columns.pop(columns.index(name))
self[:] = chain(head, columns, tail)
class OrderBy(str):
'''
A single item in an `.OrderByTuple` object. This class is essentially just
a `str` with some extra properties.
'''
QUERYSET_SEPARATOR = '__'
@property
def bare(self):
'''
Returns:
`.OrderBy`: the bare form.
The *bare form* is the non-prefixed form. Typically the bare form is
just the ascending form.
Example: ``age`` is the bare form of ``-age``
'''
return OrderBy(self[1:]) if self[:1] == '-' else self
@property
def opposite(self):
'''
Provides the opposite of the current sorting directon.
Returns:
`.OrderBy`: object with an opposite sort influence.
Example::
>>> order_by = OrderBy('name')
>>> order_by.opposite
'-name'
'''
return OrderBy(self[1:]) if self.is_descending else OrderBy('-' + self)
@property
def is_descending(self):
'''
Returns `True` if this object induces *descending* ordering.
'''
return self.startswith('-')
@property
def is_ascending(self):
'''
Returns `True` if this object induces *ascending* ordering.
'''
return not self.is_descending
def for_queryset(self):
'''
Returns the current instance usable in Django QuerySet's order_by
arguments.
'''
return self.replace(Accessor.SEPARATOR, OrderBy.QUERYSET_SEPARATOR)
@six.python_2_unicode_compatible
class OrderByTuple(tuple):
'''
Stores ordering as (as `.OrderBy` objects). The `~.Table.order_by` property
is always converted to an `.OrderByTuple` object.
This class is essentially just a `tuple` with some useful extras.
Example::
>>> x = OrderByTuple(('name', '-age'))
>>> x['age']
'-age'
>>> x['age'].is_descending
True
>>> x['age'].opposite
'age'
'''
def __new__(cls, iterable):
transformed = []
for item in iterable:
if not isinstance(item, OrderBy):
item = OrderBy(item)
transformed.append(item)
return super(OrderByTuple, cls).__new__(cls, transformed)
def __str__(self):
return ','.join(self)
def __contains__(self, name):
'''
Determine if a column has an influence on ordering.
Example::
>>> x = OrderByTuple(('name', ))
>>> 'name' in x
True
>>> '-name' in x
True
Arguments:
name (str): The name of a column. (optionally prefixed)
Returns:
bool: `True` if the column with `name` influences the ordering.
'''
name = OrderBy(name).bare
for order_by in self:
if order_by.bare == name:
return True
return False
def __getitem__(self, index):
'''
Allows an `.OrderBy` object to be extracted via named or integer
based indexing.
When using named based indexing, it's fine to used a prefixed named::
>>> x = OrderByTuple(('name', '-age'))
>>> x[0]
'name'
>>> x['age']
'-age'
>>> x['-age']
'-age'
Arguments:
index (int): Index to query the ordering for.
Returns:
`.OrderBy`: for the ordering at the index.
'''
if isinstance(index, six.string_types):
for order_by in self:
if order_by == index or order_by.bare == index:
return order_by
raise KeyError
return super(OrderByTuple, self).__getitem__(index)
@property
def key(self):
accessors = []
reversing = []
for order_by in self:
accessors.append(Accessor(order_by.bare))
reversing.append(order_by.is_descending)
@total_ordering
class Comparator(object):
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
for accessor in accessors:
a = accessor.resolve(self.obj, quiet=True)
b = accessor.resolve(other.obj, quiet=True)
if not a == b:
return False
return True
def __lt__(self, other):
for accessor, reverse in six.moves.zip(accessors, reversing):
a = accessor.resolve(self.obj, quiet=True)
b = accessor.resolve(other.obj, quiet=True)
if a == b:
continue
if reverse:
a, b = b, a
# The rest of this should be refactored out into a util
# function 'compare' that handles different types.
try:
return a < b
except TypeError:
# If the truth values differ, it's a good way to
# determine ordering.
if bool(a) is not bool(b):
return bool(a) < bool(b)
# Handle comparing different types, by falling back to
# the string and id of the type. This at least groups
# different types together.
a_type = type(a)
b_type = type(b)
return (repr(a_type), id(a_type)) < (repr(b_type), id(b_type))
return False
return Comparator
def get(self, key, fallback):
'''
Identical to __getitem__, but supports fallback value.
'''
try:
return self[key]
except (KeyError, IndexError):
return fallback
@property
def opposite(self):
'''
Return version with each `.OrderBy` prefix toggled::
>>> order_by = OrderByTuple(('name', '-age'))
>>> order_by.opposite
('-name', 'age')
'''
return type(self)((o.opposite for o in self))
class Accessor(str):
'''
A string describing a path from one object to another via attribute/index
accesses. For convenience, the class has an alias `.A` to allow for more concise code.
Relations are separated by a ``.`` character.
'''
SEPARATOR = '.'
def resolve(self, context, safe=True, quiet=False):
'''
Return an object described by the accessor by traversing the attributes
of *context*.
Lookups are attempted in the following order:
- dictionary (e.g. ``obj[related]``)
- attribute (e.g. ``obj.related``)
- list-index lookup (e.g. ``obj[int(related)]``)
Callable objects are called, and their result is used, before
proceeding with the resolving.
Example::
>>> x = Accessor('__len__')
>>> x.resolve('brad')
4
>>> x = Accessor('0.upper')
>>> x.resolve('brad')
'B'
Arguments:
context (object): The root/first object to traverse.
safe (bool): Don't call anything with `alters_data = True`
quiet (bool): Smother all exceptions and instead return `None`
Returns:
target object
Raises:
TypeError`, `AttributeError`, `KeyError`, `ValueError`
(unless `quiet` == `True`)
'''
try:
current = context
for bit in self.bits:
try: # dictionary lookup
current = current[bit]
except (TypeError, AttributeError, KeyError):
try: # attribute lookup
current = getattr(current, bit)
except (TypeError, AttributeError):
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # dict without `int(bit)` key
TypeError, # unsubscriptable object
):
raise ValueError('Failed lookup for key [%s] in %r'
', when resolving the accessor %s' % (bit, current, self)
)
if callable(current):
if safe and getattr(current, 'alters_data', False):
raise ValueError('refusing to call %s() because `.alters_data = True`'
% repr(current))
if not getattr(current, 'do_not_call_in_templates', False):
current = current()
# important that we break in None case, or a relationship
# spanning across a null-key will raise an exception in the
# next iteration, instead of defaulting.
if current is None:
break
return current
except:
if not quiet:
raise
@property
def bits(self):
if self == '':
return ()
return self.split(self.SEPARATOR)
def get_field(self, model):
'''
Return the django model field for model in context, following relations.
'''
if not hasattr(model, '_meta'):
return
field = None
for bit in self.bits:
try:
field = model._meta.get_field(bit)
except FieldDoesNotExist:
break
if hasattr(field, 'remote_field'):
rel = getattr(field, 'remote_field', None)
model = getattr(rel, 'model', model)
# !!! Support only for Django <= 1.8
# Remove this when support for Django 1.8 is over
else:
rel = getattr(field, 'rel', None)
model = getattr(rel, 'to', model)
return field
def penultimate(self, context, quiet=True):
'''
Split the accessor on the right-most dot '.', return a tuple with:
- the resolved left part.
- the remainder
Example::
>>> Accessor('a.b.c').penultimate({'a': {'a': 1, 'b': {'c': 2, 'd': 4}}})
({'c': 2, 'd': 4}, 'c')
'''
path, _, remainder = self.rpartition('.')
return A(path).resolve(context, quiet=quiet), remainder
A = Accessor # alias
class AttributeDict(dict):
'''
A wrapper around `dict` that knows how to render itself as HTML
style tag attributes.
The returned string is marked safe, so it can be used safely in a template.
See `.as_html` for a usage example.
'''
def as_html(self):
'''
Render to HTML tag attributes.
Example:
.. code-block:: python
>>> from django_tables2.utils import AttributeDict
>>> attrs = AttributeDict({'class': 'mytable', 'id': 'someid'})
>>> attrs.as_html()
'class="mytable" id="someid"'
:rtype: `~django.utils.safestring.SafeUnicode` object
'''
blacklist = ('th', 'td', '_ordering')
return mark_safe(' '.join(['%s="%s"' % (k, escape(v if not callable(v) else v()))
for k, v in six.iteritems(self) if k not in blacklist]))
def segment(sequence, aliases):
'''
Translates a flat sequence of items into a set of prefixed aliases.
This allows the value set by `.QuerySet.order_by` to be translated into
a list of columns that would have the same result. These are called
"order by aliases" which are optionally prefixed column names::
>>> list(segment(('a', '-b', 'c'),
... {'x': ('a'),
... 'y': ('b', '-c'),
... 'z': ('-b', 'c')}))
[('x', '-y'), ('x', 'z')]
'''
if not (sequence or aliases):
return
for alias, parts in aliases.items():
variants = {
# alias: order by tuple
alias: OrderByTuple(parts),
OrderBy(alias).opposite: OrderByTuple(parts).opposite,
}
for valias, vparts in variants.items():
if list(sequence[:len(vparts)]) == list(vparts):
tail_aliases = dict(aliases)
del tail_aliases[alias]
tail_sequence = sequence[len(vparts):]
if tail_sequence:
for tail in segment(tail_sequence, tail_aliases):
yield tuple(chain([valias], tail))
else:
continue
else:
yield tuple([valias])
def signature(fn):
'''
Returns:
tuple: Returns a (arguments, kwarg_name)-tuple:
- the arguments (positional or keyword)
- the name of the ** kwarg catch all.
The self-argument for methods is always removed.
'''
import inspect
# getargspec is Deprecated since version 3.0, so if not PY2, use the new
# inspect api.
if six.PY2:
argspec = inspect.getargspec(fn)
args = argspec.args
if len(args) > 0:
args = tuple(args[1:] if args[0] == 'self' else args)
return (args, argspec.keywords)
# python 3 version:
signature = inspect.signature(fn)
args = []
keywords = None
for arg in signature.parameters.values():
if arg.kind == arg.VAR_KEYWORD:
keywords = arg.name
elif arg.kind == arg.VAR_POSITIONAL:
continue # skip *args catch-all
else:
args.append(arg.name)
return tuple(args), keywords
def call_with_appropriate(fn, kwargs):
'''
Calls the function ``fn`` with the keyword arguments from ``kwargs`` it expects
If the kwargs argument is defined, pass all arguments, else provide exactly
the arguments wanted.
'''
args, keyword = signature(fn)
if not keyword:
kwargs = {key: kwargs[key] for key in kwargs if key in args}
return fn(**kwargs)
def computed_values(d, *args, **kwargs):
'''
Returns a new `dict` that has callable values replaced with the return values.
Example::
>>> compute_values({'foo': lambda: 'bar'})
{'foo': 'bar'}
Arbitrarily deep structures are supported. The logic is as follows:
1. If the value is callable, call it and make that the new value.
2. If the value is an instance of dict, use ComputableDict to compute its keys.
Example::
>>> def parents():
... return {
... 'father': lambda: 'Foo',
... 'mother': 'Bar'
... }
...
>>> a = {
... 'name': 'Brad',
... 'parents': parents
... }
...
>>> computed_values(a)
{'name': 'Brad', 'parents': {'father': 'Foo', 'mother': 'Bar'}}
Arguments:
d (dict): The original dictionary.
args: any extra positional arguments will be passed to the callables
kwargs: any extra keyword arguments will be passed to the callables.
Returns:
dict: with callable values replaced.
'''
result = {}
for k, v in six.iteritems(d):
if callable(v):
v = v(*args, **kwargs)
if isinstance(v, dict):
v = computed_values(v, *args, **kwargs)
result[k] = v
return result
|
vicky2135/lucious
|
oscar/lib/python2.7/site-packages/django_tables2/utils.py
|
Python
|
bsd-3-clause
| 17,566
|
#coding=utf-8
from django import template
import random
register = template.Library()
from django.db.models.query import QuerySet
@register.filter
def randomize(values):
if not isinstance(values, QuerySet):
if isinstance(values, list):
return random.shuffle(values)
return values
return values.order_by('?')
|
nickburlett/feincms_gallery
|
gallery/templatetags/gallery_tags.py
|
Python
|
bsd-3-clause
| 346
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import Client
from easy_split.experiments.models import (AnonymousVisitor, Experiment,
GoalRecord, GoalType)
from easy_split.experiments.tests.utils import TestCase
from easy_split.experiments.views import TRANSPARENT_1X1_PNG
class BugViewTest(TestCase):
urls = 'easy_split.experiments.tests.urls'
def call_goal(self, client, goal):
url = reverse('easy_split.experiments.views.record_experiment_goal',
args=[goal])
response = client.get(url)
self.assertEquals(response.status_code, 200)
self.assertEquals(response['Content-Type'], 'image/png')
self.assertEquals(response['Cache-Control'], 'max-age=0')
self.assertEquals(response.content, TRANSPARENT_1X1_PNG)
def testPngResponse(self):
experiment = Experiment(name="test-experiment")
experiment.save()
experiment.state = Experiment.ENABLED_STATE
experiment.save()
goal_type = GoalType(name='test-goal')
goal_type.save()
experiment_url = reverse("easy_split.experiments.tests.views.experiment_test",
args=[experiment.name])
confirm_human_url = reverse("easy_split.experiments.views.confirm_human")
client = Client()
# we can call with invalid or inexisting names, the response is the same
self.call_goal(client, '')
self.call_goal(client, 'unknown-goal')
# this is an anonymous visitor not enrolled in an experiment,
# so no records should be created
self.call_goal(client, goal_type.name)
self.assertEquals(0, GoalRecord.objects.filter(goal_type=goal_type).count())
nb_anonymous_visitors = AnonymousVisitor.objects.count()
# force the user to be a verified human
response = client.get(confirm_human_url)
self.assertEquals(response.status_code, 204)
# force the anonymous visitor to be enrolled in an experiment
response = client.get(experiment_url)
self.assertEquals(response.status_code, 200)
self.assertEquals(nb_anonymous_visitors + 1,
AnonymousVisitor.objects.count())
client.get('/test-experiment/%s' % experiment.name)
self.assertEquals(nb_anonymous_visitors + 1,
AnonymousVisitor.objects.count())
# now call an existing goal again - it should be recorded
self.call_goal(client, goal_type.name)
self.assertEquals(1, GoalRecord.objects.filter(goal_type=goal_type).count())
# should be recorded again
self.call_goal(client, goal_type.name)
self.assertEquals(2, GoalRecord.objects.filter(goal_type=goal_type).count())
# validate that both of the records have the same anonymous_visitor
two_goal_records = GoalRecord.objects.filter(goal_type=goal_type)
self.assertEquals(two_goal_records[0].anonymous_visitor,
two_goal_records[1].anonymous_visitor)
# try it with a registered user
client = Client()
user = User(username="testuser", email="testuser@example.com")
user.set_password("password")
user.save()
response = client.login(username=user.username, password='password')
self.assertTrue(response)
# force the registered user to be enrolled in an experiment
client.get('/test-experiment/%s' % experiment.name)
self.call_goal(client, goal_type.name)
# since the user was registered, no new records should be created
self.assertEquals(2, GoalRecord.objects.filter(goal_type=goal_type).count())
|
Miserlou/django-easy-split
|
easy_split/tests/test_bug_view.py
|
Python
|
bsd-3-clause
| 3,944
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import shutil
import sys
import tempfile
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(_SRC_DIR, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
import chrome_cache
import controller
import devtools_monitor
import device_setup
import loading_trace
import sandwich_metrics
_JOB_SEARCH_PATH = 'sandwich_jobs'
# Devtools timeout of 1 minute to avoid websocket timeout on slow
# network condition.
_DEVTOOLS_TIMEOUT = 60
def _ReadUrlsFromJobDescription(job_name):
"""Retrieves the list of URLs associated with the job name."""
try:
# Extra sugar: attempt to load from a relative path.
json_file_name = os.path.join(os.path.dirname(__file__), _JOB_SEARCH_PATH,
job_name)
with open(json_file_name) as f:
json_data = json.load(f)
except IOError:
# Attempt to read by regular file name.
with open(job_name) as f:
json_data = json.load(f)
key = 'urls'
if json_data and key in json_data:
url_list = json_data[key]
if isinstance(url_list, list) and len(url_list) > 0:
return url_list
raise Exception('Job description does not define a list named "urls"')
def _CleanPreviousTraces(output_directories_path):
"""Cleans previous traces from the output directory.
Args:
output_directories_path: The output directory path where to clean the
previous traces.
"""
for dirname in os.listdir(output_directories_path):
directory_path = os.path.join(output_directories_path, dirname)
if not os.path.isdir(directory_path):
continue
try:
int(dirname)
except ValueError:
continue
shutil.rmtree(directory_path)
class SandwichRunner(object):
"""Sandwich runner.
This object is meant to be configured first and then run using the Run()
method. The runner can configure itself conveniently with parsed arguement
using the PullConfigFromArgs() method. The only job is to make sure that the
command line flags have `dest` parameter set to existing runner members.
"""
def __init__(self):
"""Configures a sandwich runner out of the box.
Public members are meant to be configured as wished before calling Run().
Args:
job_name: The job name to get the associated urls.
"""
# Cache operation to do before doing the chrome navigation.
# Can be: clear,save,push,reload
self.cache_operation = 'clear'
# The cache archive's path to save to or push from. Is str or None.
self.cache_archive_path = None
# Controls whether the WPR server should do script injection.
self.disable_wpr_script_injection = False
# The job name. Is str.
self.job_name = '__unknown_job'
# Number of times to repeat the job.
self.job_repeat = 1
# Network conditions to emulate. None if no emulation.
self.network_condition = None
# Network condition emulator. Can be: browser,wpr
self.network_emulator = 'browser'
# Output directory where to save the traces. Is str or None.
self.trace_output_directory = None
# List of urls to run.
self.urls = []
# Configures whether to record speed-index video.
self.record_video = False
# Path to the WPR archive to load or save. Is str or None.
self.wpr_archive_path = None
# Configures whether the WPR archive should be read or generated.
self.wpr_record = False
self._chrome_ctl = None
self._local_cache_directory_path = None
def LoadJob(self, job_name):
self.job_name = job_name
self.urls = _ReadUrlsFromJobDescription(job_name)
def PullConfigFromArgs(self, args):
"""Configures the sandwich runner from parsed command line argument.
Args:
args: The command line parsed argument.
"""
for config_name in self.__dict__.keys():
if config_name in args.__dict__:
self.__dict__[config_name] = args.__dict__[config_name]
def PrintConfig(self):
"""Print the current sandwich runner configuration to stdout. """
for config_name in sorted(self.__dict__.keys()):
if config_name[0] != '_':
print '{} = {}'.format(config_name, self.__dict__[config_name])
def _CleanTraceOutputDirectory(self):
assert self.trace_output_directory
if not os.path.isdir(self.trace_output_directory):
try:
os.makedirs(self.trace_output_directory)
except OSError:
logging.error('Cannot create directory for results: %s',
self.trace_output_directory)
raise
else:
_CleanPreviousTraces(self.trace_output_directory)
def _SaveRunInfos(self, urls):
assert self.trace_output_directory
run_infos = {
'cache-op': self.cache_operation,
'job_name': self.job_name,
'urls': urls
}
with open(os.path.join(self.trace_output_directory, 'run_infos.json'),
'w') as file_output:
json.dump(run_infos, file_output, indent=2)
def _GetEmulatorNetworkCondition(self, emulator):
if self.network_emulator == emulator:
return self.network_condition
return None
def _RunNavigation(self, url, clear_cache, run_id=None):
"""Run a page navigation to the given URL.
Args:
url: The URL to navigate to.
clear_cache: Whether if the cache should be cleared before navigation.
run_id: Id of the run in the output directory. If it is None, then no
trace or video will be saved.
"""
run_path = None
if self.trace_output_directory is not None and run_id is not None:
run_path = os.path.join(self.trace_output_directory, str(run_id))
if not os.path.isdir(run_path):
os.makedirs(run_path)
self._chrome_ctl.SetNetworkEmulation(
self._GetEmulatorNetworkCondition('browser'))
# TODO(gabadie): add a way to avoid recording a trace.
with self._chrome_ctl.Open() as connection:
if clear_cache:
connection.ClearCache()
if run_path is not None and self.record_video:
device = self._chrome_ctl.GetDevice()
assert device, 'Can only record video on a remote device.'
video_recording_path = os.path.join(run_path, 'video.mp4')
with device_setup.RemoteSpeedIndexRecorder(device, connection,
video_recording_path):
trace = loading_trace.LoadingTrace.RecordUrlNavigation(
url=url,
connection=connection,
chrome_metadata=self._chrome_ctl.ChromeMetadata(),
categories=sandwich_metrics.CATEGORIES,
timeout_seconds=_DEVTOOLS_TIMEOUT)
else:
trace = loading_trace.LoadingTrace.RecordUrlNavigation(
url=url,
connection=connection,
chrome_metadata=self._chrome_ctl.ChromeMetadata(),
categories=sandwich_metrics.CATEGORIES,
timeout_seconds=_DEVTOOLS_TIMEOUT)
if run_path is not None:
trace_path = os.path.join(run_path, 'trace.json')
trace.ToJsonFile(trace_path)
def _RunUrl(self, url, run_id):
clear_cache = False
if self.cache_operation == 'clear':
clear_cache = True
elif self.cache_operation == 'push':
self._chrome_ctl.PushBrowserCache(self._local_cache_directory_path)
elif self.cache_operation == 'reload':
self._RunNavigation(url, clear_cache=True)
elif self.cache_operation == 'save':
clear_cache = run_id == 0
self._RunNavigation(url, clear_cache=clear_cache, run_id=run_id)
def _PullCacheFromDevice(self):
assert self.cache_operation == 'save'
assert self.cache_archive_path, 'Need to specify where to save the cache'
cache_directory_path = self._chrome_ctl.PullBrowserCache()
chrome_cache.ZipDirectoryContent(
cache_directory_path, self.cache_archive_path)
shutil.rmtree(cache_directory_path)
def Run(self):
"""SandwichRunner main entry point meant to be called once configured."""
assert self._chrome_ctl == None
assert self._local_cache_directory_path == None
if self.trace_output_directory:
self._CleanTraceOutputDirectory()
# TODO(gabadie): Make sandwich working on desktop.
device = device_utils.DeviceUtils.HealthyDevices()[0]
self._chrome_ctl = controller.RemoteChromeController(device)
self._chrome_ctl.AddChromeArgument('--disable-infobars')
if self.cache_operation == 'save':
self._chrome_ctl.SetSlowDeath()
if self.cache_operation == 'push':
assert os.path.isfile(self.cache_archive_path)
self._local_cache_directory_path = tempfile.mkdtemp(suffix='.cache')
chrome_cache.UnzipDirectoryContent(
self.cache_archive_path, self._local_cache_directory_path)
ran_urls = []
with self._chrome_ctl.OpenWprHost(self.wpr_archive_path,
record=self.wpr_record,
network_condition_name=self._GetEmulatorNetworkCondition('wpr'),
disable_script_injection=self.disable_wpr_script_injection
):
for _ in xrange(self.job_repeat):
for url in self.urls:
self._RunUrl(url, run_id=len(ran_urls))
ran_urls.append(url)
if self._local_cache_directory_path:
shutil.rmtree(self._local_cache_directory_path)
self._local_cache_directory_path = None
if self.cache_operation == 'save':
self._PullCacheFromDevice()
if self.trace_output_directory:
self._SaveRunInfos(ran_urls)
self._chrome_ctl = None
|
was4444/chromium.src
|
tools/android/loading/sandwich_runner.py
|
Python
|
bsd-3-clause
| 9,630
|
#! /usr/bin/env python3
"Replace CRLF with LF in argument files. Print names of changed files."
import sys, os
def main():
for filename in sys.argv[1:]:
if os.path.isdir(filename):
print(filename, "Directory!")
continue
data = open(filename, "rb").read()
if '\0' in data:
print(filename, "Binary!")
continue
newdata = data.replace("\r\n", "\n")
if newdata != data:
print(filename)
f = open(filename, "wb")
f.write(newdata)
f.close()
if __name__ == '__main__':
main()
|
mhubig/intelhex
|
tools/crlf.py
|
Python
|
bsd-3-clause
| 615
|
"""
Tests for the following offsets:
- CustomBusinessMonthBase
- CustomBusinessMonthBegin
- CustomBusinessMonthEnd
"""
from __future__ import annotations
from datetime import (
date,
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
_CustomBusinessMonth,
)
from pandas import (
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset: type[_CustomBusinessMonth] = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthBegin>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
apply_cases: _ApplyCases = [
(
CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3),
},
),
(
2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1),
},
),
(
-CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1),
},
),
(
-2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1),
},
),
(
CBMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
0
] == datetime(2012, 1, 3)
@pytest.mark.parametrize(
"case",
[
(
CBMonthBegin(n=1, offset=timedelta(days=5)),
{
datetime(2021, 3, 1): datetime(2021, 4, 1) + timedelta(days=5),
datetime(2021, 4, 17): datetime(2021, 5, 3) + timedelta(days=5),
},
),
(
CBMonthBegin(n=2, offset=timedelta(days=40)),
{
datetime(2021, 3, 10): datetime(2021, 5, 3) + timedelta(days=40),
datetime(2021, 4, 30): datetime(2021, 6, 1) + timedelta(days=40),
},
),
(
CBMonthBegin(n=1, offset=timedelta(days=-5)),
{
datetime(2021, 3, 1): datetime(2021, 4, 1) - timedelta(days=5),
datetime(2021, 4, 11): datetime(2021, 5, 3) - timedelta(days=5),
},
),
(
-2 * CBMonthBegin(n=1, offset=timedelta(days=10)),
{
datetime(2021, 3, 1): datetime(2021, 1, 1) + timedelta(days=10),
datetime(2021, 4, 3): datetime(2021, 3, 1) + timedelta(days=10),
},
),
(
CBMonthBegin(n=0, offset=timedelta(days=1)),
{
datetime(2021, 3, 2): datetime(2021, 4, 1) + timedelta(days=1),
datetime(2021, 4, 1): datetime(2021, 4, 1) + timedelta(days=1),
},
),
(
CBMonthBegin(
n=1, holidays=["2021-04-01", "2021-04-02"], offset=timedelta(days=1)
),
{
datetime(2021, 3, 2): datetime(2021, 4, 5) + timedelta(days=1),
},
),
],
)
def test_apply_with_extra_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset: type[_CustomBusinessMonth] = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
(
2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31),
},
),
(
-CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31),
},
),
(
-2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31),
},
),
(
CBMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
0
] == datetime(2012, 1, 31)
@pytest.mark.parametrize(
"case",
[
(
CBMonthEnd(n=1, offset=timedelta(days=5)),
{
datetime(2021, 3, 1): datetime(2021, 3, 31) + timedelta(days=5),
datetime(2021, 4, 17): datetime(2021, 4, 30) + timedelta(days=5),
},
),
(
CBMonthEnd(n=2, offset=timedelta(days=40)),
{
datetime(2021, 3, 10): datetime(2021, 4, 30) + timedelta(days=40),
datetime(2021, 4, 30): datetime(2021, 6, 30) + timedelta(days=40),
},
),
(
CBMonthEnd(n=1, offset=timedelta(days=-5)),
{
datetime(2021, 3, 1): datetime(2021, 3, 31) - timedelta(days=5),
datetime(2021, 4, 11): datetime(2021, 4, 30) - timedelta(days=5),
},
),
(
-2 * CBMonthEnd(n=1, offset=timedelta(days=10)),
{
datetime(2021, 3, 1): datetime(2021, 1, 29) + timedelta(days=10),
datetime(2021, 4, 3): datetime(2021, 2, 26) + timedelta(days=10),
},
),
(
CBMonthEnd(n=0, offset=timedelta(days=1)),
{
datetime(2021, 3, 2): datetime(2021, 3, 31) + timedelta(days=1),
datetime(2021, 4, 1): datetime(2021, 4, 30) + timedelta(days=1),
},
),
(
CBMonthEnd(n=1, holidays=["2021-03-31"], offset=timedelta(days=1)),
{
datetime(2021, 3, 2): datetime(2021, 3, 30) + timedelta(days=1),
},
),
],
)
def test_apply_with_extra_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
|
pandas-dev/pandas
|
pandas/tests/tseries/offsets/test_custom_business_month.py
|
Python
|
bsd-3-clause
| 14,134
|
from typing import Dict, Optional, Tuple, Union
from autosklearn.pipeline.base import DATASET_PROPERTIES_TYPE, PIPELINE_DATA_DTYPE
from autosklearn.pipeline.constants import DENSE, UNSIGNED_DATA, INPUT, SPARSE
from autosklearn.pipeline.components.data_preprocessing.rescaling.abstract_rescaling \
import Rescaling
from autosklearn.pipeline.components.base import AutoSklearnPreprocessingAlgorithm
class NoRescalingComponent(Rescaling, AutoSklearnPreprocessingAlgorithm):
def fit(self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE] = None
) -> 'AutoSklearnPreprocessingAlgorithm':
self.preprocessor = 'passthrough'
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
return X
@staticmethod
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {'shortname': 'NoRescaling',
'name': 'NoRescaling',
'handles_missing_values': False,
'handles_nominal_values': False,
'handles_numerical_features': True,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': True,
'is_deterministic': True,
# TODO find out if this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (SPARSE, DENSE, UNSIGNED_DATA),
'output': (INPUT,),
'preferred_dtype': None}
|
automl/auto-sklearn
|
autosklearn/pipeline/components/data_preprocessing/rescaling/none.py
|
Python
|
bsd-3-clause
| 1,808
|
# -*- coding: utf-8 -*-
#
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import collections.abc
from contextlib import contextmanager
import platform
import signal
import sys
from decorator import decorator
import numpy as np
VALID_BROWSE_BACKENDS = (
'qt',
'matplotlib',
)
VALID_3D_BACKENDS = (
'pyvistaqt', # default 3d backend
'notebook',
)
ALLOWED_QUIVER_MODES = ('2darrow', 'arrow', 'cone', 'cylinder', 'sphere',
'oct')
def _get_colormap_from_array(colormap=None, normalized_colormap=False,
default_colormap='coolwarm'):
from matplotlib import cm
from matplotlib.colors import ListedColormap
if colormap is None:
cmap = cm.get_cmap(default_colormap)
elif isinstance(colormap, str):
cmap = cm.get_cmap(colormap)
elif normalized_colormap:
cmap = ListedColormap(colormap)
else:
cmap = ListedColormap(np.array(colormap) / 255.0)
return cmap
def _check_color(color):
from matplotlib.colors import colorConverter
if isinstance(color, str):
color = colorConverter.to_rgb(color)
elif isinstance(color, collections.abc.Iterable):
np_color = np.array(color)
if np_color.size % 3 != 0 and np_color.size % 4 != 0:
raise ValueError("The expected valid format is RGB or RGBA.")
if np_color.dtype in (np.int64, np.int32):
if (np_color < 0).any() or (np_color > 255).any():
raise ValueError("Values out of range [0, 255].")
elif np_color.dtype == np.float64:
if (np_color < 0.0).any() or (np_color > 1.0).any():
raise ValueError("Values out of range [0.0, 1.0].")
else:
raise TypeError("Expected data type is `np.int64`, `np.int32`, or "
"`np.float64` but {} was given."
.format(np_color.dtype))
else:
raise TypeError("Expected type is `str` or iterable but "
"{} was given.".format(type(color)))
return color
def _alpha_blend_background(ctable, background_color):
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_table = ctable.copy()
use_table[:, -1] = 255.
return (use_table * alphas) + background_color * (1 - alphas)
@decorator
def run_once(fun, *args, **kwargs):
"""Run the function only once."""
if not hasattr(fun, "_has_run"):
fun._has_run = True
return fun(*args, **kwargs)
@run_once
def _init_qt_resources():
from ...icons import resources
resources.qInitResources()
@contextmanager
def _qt_disable_paint(widget):
paintEvent = widget.paintEvent
widget.paintEvent = lambda *args, **kwargs: None
try:
yield
finally:
widget.paintEvent = paintEvent
def _init_mne_qtapp(enable_icon=True, pg_app=False):
"""Get QApplication-instance for MNE-Python.
Parameter
---------
enable_icon: bool
If to set an MNE-icon for the app.
pg_app: bool
If to create the QApplication with pyqtgraph. For an until know
undiscovered reason the pyqtgraph-browser won't show without
mkQApp from pyqtgraph.
Returns
-------
app: ``PyQt5.QtWidgets.QApplication``
Instance of QApplication.
"""
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QIcon
app_name = 'MNE-Python'
organization_name = 'MNE'
# Fix from cbrnr/mnelab for app name in menu bar
# This has to come *before* the creation of the QApplication to work.
# It also only affects the title bar, not the application dock.
# There seems to be no way to change the application dock from "python"
# at runtime.
if sys.platform.startswith("darwin"):
try:
# set bundle name on macOS (app name shown in the menu bar)
from Foundation import NSBundle
bundle = NSBundle.mainBundle()
info = bundle.localizedInfoDictionary() or bundle.infoDictionary()
info["CFBundleName"] = app_name
except ModuleNotFoundError:
pass
if pg_app:
from pyqtgraph import mkQApp
app = mkQApp(app_name)
else:
app = QApplication.instance() or QApplication(sys.argv or [app_name])
app.setApplicationName(app_name)
app.setOrganizationName(organization_name)
if enable_icon:
# Set icon
_init_qt_resources()
kind = 'bigsur-' if platform.mac_ver()[0] >= '10.16' else ''
app.setWindowIcon(QIcon(f":/mne-{kind}icon.png"))
return app
# https://stackoverflow.com/questions/5160577/ctrl-c-doesnt-work-with-pyqt
def _qt_app_exec(app):
# adapted from matplotlib
old_signal = signal.getsignal(signal.SIGINT)
is_python_signal_handler = old_signal is not None
if is_python_signal_handler:
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
app.exec_()
finally:
# reset the SIGINT exception handler
if is_python_signal_handler:
signal.signal(signal.SIGINT, old_signal)
|
larsoner/mne-python
|
mne/viz/backends/_utils.py
|
Python
|
bsd-3-clause
| 5,283
|
# Copyright (c) James Percent, Byron Galbraith and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import logging
import time
class UnlockState(object):
def __init__(self, state=None):
super(UnlockState, self).__init__()
self.state = state
self.running = False
def start(self):
self.running = True
def stop(self):
self.running = False
def is_stopped(self):
return self.running
def get_state(self):
return self.state
def process_command(self, command):
''' Subclass hook '''
pass
class UnlockStateChain(UnlockState):
def __init__(self, states):
super(UnlockStateChain, self).__init__()
self.states = states
def start(self):
for state in self.states:
if state is not None:
state.start()
def stop(self):
for state in self.states:
if state is not None:
state.stop()
def process_command(self, command):
for state in self.states:
if state is not None:
state.process_command(command)
class AlternatingBinaryState(UnlockState):
def __init__(self, hold_duration=300):
self.hold_duration = hold_duration
self.state = True
self.count = 0
def get_state(self):
ret = self.state
self.count += 1
if self.count % self.hold_duration == 0:
self.state = not self.state
return ret
class OfflineData(UnlockState):
def __init__(self, output_file_prefix, cache_size=1):
super(OfflineData, self).__init__()
self.output_file_prefix = output_file_prefix
self.file_handle = None
self.logger = logging.getLogger(__name__)
self.cache = list(range(cache_size))
self.cache_size = cache_size
self.current = 0
self.last_invalid = 0
self.invalid_count = 0
def cache(self, command):
self.cache[self.current] = command.matrix
self.current = 0 if (self.current % self.cache_size) == 0 else self.current + 1
def process_command(self, command):
if self.file_handle is None:
self.logger.warning("state not started")
return
if command.is_valid():
np.savetxt(self.file_handle, command.matrix, fmt='%d', delimiter='\t')
else:
if (time.time() - self.last_invalid) < 1.5:
self.invalid_count += 1
else:
msg = 'invalid command cannot be logged'
if self.invalid_count > 0:
msg += '; logging attempted '+str(self.invalid_count)+' times in the last 1.5 secs'
self.logger.warning(msg)
self.invalid_count = 0
self.last_invalid = time.time()
#else:
#XXX - hack for test
# a = np.array([0,0,0,0,0,0])
# a = np.hstack(a)
# np.savetxt(self.file_handle, a, fmt='%d', delimiter='\t')
def get_state(self):
raise NotImplementedError()
def start(self):
if self.file_handle is None:
self.file_handle = open("%s_%d.txt" % (self.output_file_prefix, time.time()), 'wb')
else:
self.logger.warning("starting already stared state machine")
def stop(self):
if self.file_handle is None:
self.logger.warning('state already stopped')
return
self.file_handle.flush()
self.file_handle.close()
self.file_handle = None
class OfflineTrialData(OfflineData):
def __init__(self, output_file_prefix, cache_size=1):
super(OfflineTrialData, self).__init__(output_file_prefix, cache_size)
self.commands = []
def process_command(self, command):
assert self.file_handle != None
if not command.is_valid():
return
if command.decision is not None:
for i in self.commands:
np.savetxt(self.file_handle, command.matrix, fmt='%d', delimiter='\t')
self.cache(command)
self.commands = []
else:
self.commands.append(command)
class NonBlockingOfflineData(UnlockState):
def __init__(self):
raise NotImplementedError()
class RunState(object):
Stopped = 0
Running = 1
Resting = 2
def __init__(self):
self.state = RunState.Stopped
def run(self):
self.state = RunState.Running
def rest(self):
self.state = RunState.Resting
def stop(self):
self.state = RunState.Stopped
def is_running(self):
return self.state == RunState.Running
def is_resting(self):
return self.state == RunState.Resting
def is_stopped(self):
return self.state == RunState.Stopped
class TimerState(object):
"""
A timer based off the variable time deltas coming from the system.
In the event the timer duration is small i.e. < 100ms, jitter in the delta
value can cause problems. Keeping the residual time instead of a full
reset has been shown to have better accuracy in this case.
"""
def __init__(self, duration):
self.duration = float(duration)
self.reset = lambda t: 0
if self.duration < 0.1:
self.reset = lambda t: t - self.duration
self.elapsed = 0
self.last_time = -1
def begin_timer(self):
# TODO: smarter time adjustment strategy
self.elapsed = self.reset(self.elapsed)
self.last_time = time.time()
def update_timer(self, delta):
self.elapsed += delta
def is_complete(self):
return self.elapsed >= self.duration
def set_duration(self, duration):
self.duration = float(duration)
class FrameCountTimerState(object):
"""
A timer based off the variable time deltas coming from the system.
In the event the timer duration is small i.e. < 100ms, jitter in the delta
value can cause problems. Keeping the residual time instead of a full
reset has been shown to have better accuracy in this case.
"""
def __init__(self, duration_in_frames):
assert duration_in_frames >= 1
self.duration = int(60/int(duration_in_frames))
self.elapsed = 0
def begin_timer(self):
self.elapsed = 0
def update_timer(self, delta):
self.elapsed += 1
def is_complete(self):
return self.elapsed >= self.duration
def set_duration(self, duration_in_frames):
self.duration = int(duration_in_frames)
# XXX this can be refactored. there is no need for a rest state. this can be implemented such
# that the trials are 'stacked'. then rests can be stacked with trials abstractly. this
# would help to optimize cases that have no rest condition; currently a bunch of statements are
# processed to handle that case.
class TrialState(object):
Unchanged = 0
TrialExpiry = 1
RestExpiry = 2
def __init__(self, trial_timer, rest_timer, run_state):
super(TrialState, self).__init__()
self.trial_timer = trial_timer
self.rest_timer = rest_timer
self.run_state = run_state
self.active_timer = self.trial_timer
self.state_change = False
self.last_change = TrialState.Unchanged
def state_change_fn():
change_value = TrialState.Unchanged
if self.active_timer.is_complete():
if self.run_state.is_running():
self.run_state.rest()
self.active_timer = self.rest_timer
change_value = TrialState.TrialExpiry
elif self.run_state.is_resting():
self.run_state.run()
self.active_timer = self.trial_timer
change_value = TrialState.RestExpiry
self.active_timer.begin_timer()
if change_value != TrialState.Unchanged:
self.state_change = True
self.last_change = change_value
return self.run_state.state, change_value
self.update_state_table = state_change_fn
def update_state(self, delta):
self.active_timer.update_timer(delta)
return self.update_state_table()
def start(self):
self.active_timer = self.trial_timer
self.last_change = TrialState.Unchanged
self.run_state.run()
self.active_timer.begin_timer()
def stop(self):
self.run_state.stop()
def is_stopped(self):
return self.run_state.is_stopped()
def get_state(self):
if self.state_change:
self.state_change = False
ret = self.last_change
self.last_change = TrialState.Unchanged
return ret
class SequenceState(object):
def __init__(self, sequence, value_transformer_fn=lambda x: x):
self.sequence = sequence
self.value_transformer_fn = value_transformer_fn
self.index = 0
def start(self):
self.index = 0
def step(self):
self.index += 1
if self.index == len(self.sequence):
self.start()
def state(self):
return self.value_transformer_fn(self.sequence[self.index])
def is_start(self):
if self.index == 0:
return True
else:
return False
def is_end(self):
if self.index+1 == len(self.sequence):
return True
|
NeuralProsthesisLab/unlock
|
unlock/state/state.py
|
Python
|
bsd-3-clause
| 11,485
|
"""Defines application models"""
|
dailymuse/oz
|
oz/skeleton/plugin/models.py
|
Python
|
bsd-3-clause
| 32
|
"""
An example usage for`SoloThreadedTask`.
Every time that `MyThreadedList.load` is called,
the previous job is cancelled, before the next job
is allowed to start.
"""
import os
import sys
qconcurrency_path = '/'.join(os.path.realpath(__file__).replace('\\','/').split('/')[:-2])
sys.path.insert(0, qconcurrency_path )
from qconcurrency.threading_ import ThreadedTask, SoloThreadedTask
from Qt import QtCore, QtWidgets
import six
import time
class MyThreadedList( QtWidgets.QListWidget ):
def __init__(self):
QtWidgets.QListWidget.__init__(self)
self._sem_rendering = QtCore.QSemaphore(1)
self._thread_loading = SoloThreadedTask(
callback = self._find_list_items,
signals = {
'add_item': str,
'clear': None,
},
connections = {
'add_item':[self.addItem],
'clear': [self.clear],
},
)
def load(self):
"""
Loads list with items from a separate thread.
If load is already in progress, it will be cancelled
before the new load request starts.
"""
self._thread_loading.start()
def _find_list_items(self, signalmgr=None ):
"""
Adds 100 items to the list-widget,
"""
signalmgr.clear.emit()
for i in range(100):
signalmgr.handle_if_abort() # check for a request-abort, and exit early
# slow signals down, emitting one object at a time
# so that job can be cancelled.
# ( signals have no priority, once emitted, )
# ( you must wait for them all to be handled )
self._sem_rendering.tryAcquire(1,5000)
signalmgr.add_item.emit( str(i) ) # add an item to the list
def addItem(self, item):
"""
Unecessarily waits .01 seconds (so you can see widget updating live),
then adds the item to the list.
"""
time.sleep(0.01)
try:
QtWidgets.QListWidget.addItem(self, item )
except:
self._mutex_rendering.unlock()
six.reraise( *sys.exc_info() )
if not self._sem_rendering.available():
self._sem_rendering.release(1)
# in this example, the wait is performed
# in the UI thread - it never has a chance to process
# QApp events (causing item to be rendered) until it runs
# out of signals to fire. It is unlikely you will
# need the following line in your production code.
#
QtCore.QCoreApplication.instance().processEvents()
if __name__ == '__main__':
from qconcurrency import QApplication
from qconcurrency.threading_ import ThreadedTask
import supercli.logging
import time
supercli.logging.SetLog( lv=20 )
with QApplication():
# create/load the list
mylist = MyThreadedList()
mylist.show()
mylist.load()
# from a separate thread (so that it is visible)
# continuously reload the list
def multiload_list( listwidget, signalmgr=None ):
for i in range(3):
time.sleep(0.5)
listwidget.load()
task = ThreadedTask( multiload_list, listwidget=mylist )
task.start()
|
willjp/pyqconcurrency
|
examples/threadedlist.py
|
Python
|
bsd-3-clause
| 3,366
|
__version__ = '3.4.9'
|
yakky/django-filebrowser-no-grappelli
|
filebrowser/__init__.py
|
Python
|
bsd-3-clause
| 22
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^donate/$', views.donate, name='donate'),
url(r'^thank-you/(?P<donation>[\w]+)/$', views.thank_you, name='thank-you'),
url(r'^manage-donations/(?P<hero>[\w]+)/$', views.manage_donations, name='manage-donations'),
url(r'^manage-donations/(?P<hero>[\w]+)/cancel/$', views.cancel_donation, name='cancel-donation'),
url(r'^receive-webhook/$', views.receive_webhook, name='receive-webhook'),
url(r'^update-card/$', views.update_card, name='update-card'),
]
|
xavierdutreilh/djangoproject.com
|
fundraising/urls.py
|
Python
|
bsd-3-clause
| 596
|
from os import chmod, environ, path as os_path
from subprocess import call as call_subprocess
from tempfile import NamedTemporaryFile
def generate_pdf(html='', url=''):
# Validate input
if not html and not url:
raise ValueError('Must pass HTML or specify a URL')
if html and url:
raise ValueError('Must pass HTML or specify a URL, not both')
wkhtmltopdf_default = 'wkhtmltopdf-heroku'
# Reference command
wkhtmltopdf_cmd = environ.get('WKHTMLTOPDF_CMD', wkhtmltopdf_default)
# Set up return file
pdf_file = NamedTemporaryFile(delete=False, suffix='.pdf')
if html:
# Save the HTML to a temp file
html_file = NamedTemporaryFile(delete=False, suffix='.html')
html_file.write(html)
html_file.close()
# wkhtmltopdf
call_subprocess([wkhtmltopdf_cmd, '-q', html_file.name, pdf_file.name])
else:
# wkhtmltopdf, using URL
call_subprocess([wkhtmltopdf_cmd, '-q', url, pdf_file.name])
return pdf_file
|
jwmayfield/pywkher
|
pywkher/__init__.py
|
Python
|
bsd-3-clause
| 1,022
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import time
import os.path
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
By giving the `profile_dir` argument, pstat.Stats files are saved to that
directory, one file per request. Without it, a summary is printed to
`stream` instead.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
:param filter: a function taking argument environ, returns true if this
request should be profiled
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=(), profile_dir=None,
filter=None):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
self._filter = filter
def __call__(self, environ, start_response):
if not (self._filter is None or self._filter(environ)):
return self._app(environ, start_response)
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
start = time.time()
p.runcall(runapp)
body = b''.join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
prof_filename = os.path.join(self._profile_dir,
'%s.%s.%06dms.%d.prof' % (
environ['REQUEST_METHOD'],
environ.get('PATH_INFO').strip(
'/').replace('/', '.') or 'root',
elapsed * 1000.0,
time.time()
))
p.dump_stats(prof_filename)
else:
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
|
deadly11ama/werkzeug
|
werkzeug/contrib/profiler.py
|
Python
|
bsd-3-clause
| 5,453
|
"""Context library - providing usefull context managers."""
import contextlib
@contextlib.contextmanager
def suppress(*exceptions):
"""Ignore an exception or exception list.
Usage::
with suppress(OSError):
os.remove('filename.txt')
"""
try:
yield
except exceptions:
pass
|
alefnula/tea
|
tea/ctx/__init__.py
|
Python
|
bsd-3-clause
| 332
|
import datetime as dt
from unittest import skipIf
import numpy as np
from holoviews.core.overlay import NdOverlay
from holoviews.core.util import pd
from holoviews.element import Curve
from holoviews.util.transform import dim
from .test_plot import TestMPLPlot, mpl_renderer
pd_skip = skipIf(pd is None, 'Pandas is not available')
class TestCurvePlot(TestMPLPlot):
def test_curve_datetime64(self):
dates = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
curve = Curve((dates, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve)
self.assertEqual(plot.handles['axis'].get_xlim(), (16801.0, 16810.0))
@pd_skip
def test_curve_pandas_timestamps(self):
dates = pd.date_range('2016-01-01', '2016-01-10', freq='D')
curve = Curve((dates, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve)
self.assertEqual(plot.handles['axis'].get_xlim(), (16801.0, 16810.0))
def test_curve_dt_datetime(self):
dates = [dt.datetime(2016,1,i) for i in range(1, 11)]
curve = Curve((dates, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve)
self.assertEqual(tuple(map(round, plot.handles['axis'].get_xlim())), (16801.0, 16810.0))
def test_curve_heterogeneous_datetime_types_overlay(self):
dates64 = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
dates = [dt.datetime(2016,1,i) for i in range(2, 12)]
curve_dt64 = Curve((dates64, np.random.rand(10)))
curve_dt = Curve((dates, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve_dt*curve_dt64)
self.assertEqual(tuple(map(round, plot.handles['axis'].get_xlim())), (16801.0, 16811.0))
@pd_skip
def test_curve_heterogeneous_datetime_types_with_pd_overlay(self):
dates_pd = pd.date_range('2016-01-04', '2016-01-13', freq='D')
dates64 = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
dates = [dt.datetime(2016,1,i) for i in range(2, 12)]
curve_dt64 = Curve((dates64, np.random.rand(10)))
curve_dt = Curve((dates, np.random.rand(10)))
curve_pd = Curve((dates_pd, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve_dt*curve_dt64*curve_pd)
self.assertEqual(plot.handles['axis'].get_xlim(), (16801.0, 16813.0))
def test_curve_padding_square(self):
curve = Curve([1, 2, 3]).options(padding=0.1)
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_square_per_axis(self):
curve = Curve([1, 2, 3]).options(padding=((0, 0.1), (0.1, 0.2)))
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.4)
def test_curve_padding_hard_xrange(self):
curve = Curve([1, 2, 3]).redim.range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_soft_xrange(self):
curve = Curve([1, 2, 3]).redim.soft_range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_unequal(self):
curve = Curve([1, 2, 3]).options(padding=(0.05, 0.1))
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_nonsquare(self):
curve = Curve([1, 2, 3]).options(padding=0.1, aspect=2)
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_logx(self):
curve = Curve([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.89595845984076228)
self.assertEqual(x_range[1], 3.3483695221017129)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_logy(self):
curve = Curve([1, 2, 3]).options(padding=0.1, logy=True)
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.89595845984076228)
self.assertEqual(y_range[1], 3.3483695221017129)
def test_curve_padding_datetime_square(self):
curve = Curve([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1
)
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 16891.8)
self.assertEqual(x_range[1], 16894.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_datetime_nonsquare(self):
curve = Curve([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1, aspect=2
)
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 16891.9)
self.assertEqual(x_range[1], 16894.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
###########################
# Styling mapping #
###########################
def test_curve_scalar_color_op(self):
curve = Curve([(0, 0, 'red'), (0, 1, 'red'), (0, 2, 'red')],
vdims=['y', 'color']).options(color='color')
plot = mpl_renderer.get_plot(curve)
artist = plot.handles['artist']
self.assertEqual(artist.get_color(), 'red')
def test_op_ndoverlay_color_value(self):
colors = ['blue', 'red']
overlay = NdOverlay({color: Curve(np.arange(i))
for i, color in enumerate(colors)},
'color').options('Curve', color='color')
plot = mpl_renderer.get_plot(overlay)
for subplot, color in zip(plot.subplots.values(), colors):
style = dict(subplot.style[subplot.cyclic_index])
style = subplot._apply_transforms(subplot.current_frame, {}, style)
self.assertEqual(style['color'], color)
def test_curve_color_op(self):
curve = Curve([(0, 0, 'red'), (0, 1, 'blue'), (0, 2, 'red')],
vdims=['y', 'color']).options(color='color')
with self.assertRaises(Exception):
mpl_renderer.get_plot(curve)
def test_curve_alpha_op(self):
curve = Curve([(0, 0, 0.1), (0, 1, 0.3), (0, 2, 1)],
vdims=['y', 'alpha']).options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(curve)
def test_curve_linewidth_op(self):
curve = Curve([(0, 0, 0.1), (0, 1, 0.3), (0, 2, 1)],
vdims=['y', 'linewidth']).options(linewidth='linewidth')
with self.assertRaises(Exception):
mpl_renderer.get_plot(curve)
def test_curve_style_mapping_ndoverlay_dimensions(self):
ndoverlay = NdOverlay({
(0, 'A'): Curve([1, 2, 0]), (0, 'B'): Curve([1, 2, 1]),
(1, 'A'): Curve([1, 2, 2]), (1, 'B'): Curve([1, 2, 3])},
['num', 'cat']
).opts({
'Curve': dict(
color=dim('num').categorize({0: 'red', 1: 'blue'}),
linestyle=dim('cat').categorize({'A': '-.', 'B': '-'})
)
})
plot = mpl_renderer.get_plot(ndoverlay)
for (num, cat), sp in plot.subplots.items():
artist = sp.handles['artist']
color = artist.get_color()
if num == 0:
self.assertEqual(color, 'red')
else:
self.assertEqual(color, 'blue')
linestyle = artist.get_linestyle()
if cat == 'A':
self.assertEqual(linestyle, '-.')
else:
self.assertEqual(linestyle, '-')
def test_curve_style_mapping_constant_value_dimensions(self):
vdims = ['y', 'num', 'cat']
ndoverlay = NdOverlay({
0: Curve([(0, 1, 0, 'A'), (1, 0, 0, 'A')], vdims=vdims),
1: Curve([(0, 1, 0, 'B'), (1, 1, 0, 'B')], vdims=vdims),
2: Curve([(0, 1, 1, 'A'), (1, 2, 1, 'A')], vdims=vdims),
3: Curve([(0, 1, 1, 'B'), (1, 3, 1, 'B')], vdims=vdims)}
).opts({
'Curve': dict(
color=dim('num').categorize({0: 'red', 1: 'blue'}),
linestyle=dim('cat').categorize({'A': '-.', 'B': '-'})
)
})
plot = mpl_renderer.get_plot(ndoverlay)
for k, sp in plot.subplots.items():
artist = sp.handles['artist']
color = artist.get_color()
if ndoverlay[k].iloc[0, 2] == 0:
self.assertEqual(color, 'red')
else:
self.assertEqual(color, 'blue')
linestyle = artist.get_linestyle()
if ndoverlay[k].iloc[0, 3] == 'A':
self.assertEqual(linestyle, '-.')
else:
self.assertEqual(linestyle, '-')
|
ioam/holoviews
|
holoviews/tests/plotting/matplotlib/test_curveplot.py
|
Python
|
bsd-3-clause
| 10,641
|
# GUI Application automation and testing library
# Copyright (C) 2006-2020 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pywinauto.base_application module
------------------------------------
The application module is the main one that users will use first.
When starting to automate an application you must initialize an instance
of the Application class. Then you have to start the program with
:meth:`Application.start<pywinauto.base_application.BaseApplication.start>`
or connect to a runing process of an application with:
:meth:`Application.connect<pywinauto.base_application.BaseApplication.connect>`
Once you have an Application instance you can access dialogs in that
application by using one of the methods below. ::
dlg = app.YourDialogTitle
dlg = app.window(name="your title", classname="your class", ...)
dlg = app['Your Dialog Title']
Similarly once you have a dialog you can get a control from that dialog
in almost exactly the same ways. ::
ctrl = dlg.YourControlTitle
ctrl = dlg.child_window(name="Your control", classname="Button", ...)
ctrl = dlg["Your control"]
.. note::
For attribute access of controls and dialogs you do not have to
specify the exact name/title/text of the control. Pywinauto automatically
performs a best match of the available dialogs or controls.
With introducing the cross-platform support in pywinauto,
the Application class is automatically created with the platform
default backend. For MS Windows OS it is 'win32' and for Linux OS it is 'atspi'.
.. seealso::
:func:`pywinauto.findwindows.find_elements` for the keyword arguments that
can be passed to both:
:meth:`WindowSpecification.child_window<pywinauto.base_application.WindowSpecification.child_window>` and
:meth:`WindowSpecification.window<pywinauto.base_application.WindowSpecification.window>`
:class:`pywinauto.windows.application.Application` for the 'win32' and 'uia' backends
:class:`pywinauto.linux.application.Application` for the 'atspi' backend
"""
from __future__ import print_function
import sys
import os.path
import time
import locale
import codecs
import collections
import warnings
import six
from . import timings
from . import controls
from . import findbestmatch
from . import findwindows
from . import backend
from .actionlogger import ActionLogger
from .timings import Timings, wait_until, TimeoutError, wait_until_passes, timestamp
from . import deprecated
class AppStartError(Exception):
"""There was a problem starting the Application"""
pass # pragma: no cover
class ProcessNotFoundError(Exception):
"""Could not find that process"""
pass # pragma: no cover
class AppNotConnected(Exception):
"""Application has not been connected to a process yet"""
pass # pragma: no cover
# TODO problem with if active_only: in findwindows to use on linux
#=========================================================================
class WindowSpecification(object):
"""
A specification for finding a window or control
Windows are resolved when used.
You can also wait for existance or non existance of a window
.. implicitly document some private functions
.. automethod:: __getattribute__
.. automethod:: __getitem__
"""
WAIT_CRITERIA_MAP = {'visible': lambda ctrl, timeout, retry_interval: ctrl.wait_visible(timeout, retry_interval),
'enabled': lambda ctrl, timeout, retry_interval: ctrl.wait_enabled(timeout, retry_interval),
'active': lambda ctrl, timeout, retry_interval: ctrl.wait_active(timeout, retry_interval),
}
WAIT_NOT_CRITERIA_MAP = {'visible': lambda ctrl, timeout, retry_interval: ctrl.wait_not_visible(timeout,
retry_interval),
'enabled': lambda ctrl, timeout, retry_interval: ctrl.wait_not_enabled(timeout,
retry_interval),
'active': lambda ctrl, timeout, retry_interval: ctrl.wait_not_active(timeout,
retry_interval),
}
def __init__(self, search_criteria, allow_magic_lookup=True):
"""
Initialize the class
:param search_criteria: the criteria to match a dialog
:param allow_magic_lookup: whether attribute access must turn into child_window(best_match=...) search as fallback
"""
# kwargs will contain however to find this window
if 'backend' not in search_criteria:
search_criteria['backend'] = backend.registry.active_backend.name
if 'pid' in search_criteria and 'app' in search_criteria:
raise KeyError('Keywords "pid" and "app" cannot be combined (ambiguous). ' \
'Use one option at a time: Application object with keyword "app" or ' \
'integer process ID with keyword "process".')
self.app = search_criteria.get('app', None)
self.criteria = [search_criteria, ]
self.actions = ActionLogger()
self.backend = backend.registry.backends[search_criteria['backend']]
self.allow_magic_lookup = allow_magic_lookup
# Non PEP-8 aliases for partial backward compatibility
self.wrapper_object = deprecated(self.find, deprecated_name='wrapper_object')
self.child_window = deprecated(self.by, deprecated_name="child_window")
self.window = deprecated(self.by, deprecated_name='window')
def __call__(self, *args, **kwargs):
"""No __call__ so return a useful error"""
if "best_match" in self.criteria[-1]:
raise AttributeError("Neither GUI element (wrapper) " \
"nor wrapper method '{0}' were found (typo?)".
format(self.criteria[-1]['best_match']))
message = (
"You tried to execute a function call on a WindowSpecification "
"instance. You probably have a typo for one of the methods of "
"this class or of the targeted wrapper object.\n"
"The criteria leading up to this are: " + str(self.criteria))
raise AttributeError(message)
def _get_updated_criteria(self, criteria_):
# make a copy of the criteria
criteria = [crit.copy() for crit in criteria_]
# find the dialog
if 'backend' not in criteria[0]:
criteria[0]['backend'] = self.backend.name
if self.app is not None:
# find_elements(...) accepts only "process" argument
criteria[0]['pid'] = self.app.process
del criteria[0]['app']
return criteria
def __find_base(self, criteria_, timeout, retry_interval):
time_left = timeout
start = timestamp()
criteria = self._get_updated_criteria(criteria_)
dialog = self.backend.generic_wrapper_class(findwindows.find_element(**criteria[0]))
if len(criteria) > 1:
ctrls = []
previous_parent = dialog.element_info
for ctrl_criteria in criteria[1:]:
ctrl_criteria["top_level_only"] = False
if "parent" not in ctrl_criteria:
ctrl_criteria["parent"] = previous_parent
if isinstance(ctrl_criteria["parent"], WindowSpecification):
time_left -= timestamp() - start
if time_left <= 0.0:
raise TimeoutError("Timed out: can not find parent {} for the control with the given"
"criteria {}.".format(ctrl_criteria['parent'], ctrl_criteria))
ctrl_criteria["parent"] = ctrl_criteria["parent"].find(time_left, retry_interval)
# resolve the control and return it
if 'backend' not in ctrl_criteria:
ctrl_criteria['backend'] = self.backend.name
ctrl = self.backend.generic_wrapper_class(findwindows.find_element(**ctrl_criteria))
previous_parent = ctrl.element_info
ctrls.append(ctrl)
return ctrls[-1]
else:
return dialog
def __find_all_base(self, criteria_, timeout, retry_interval):
time_left = timeout
start = timestamp()
if len(criteria_) == 1:
criteria = self._get_updated_criteria(criteria_)
dialogs = findwindows.find_elements(**criteria[0])
return [self.backend.generic_wrapper_class(dialog) for dialog in dialogs]
else:
previous_ctrl = self.__find_base(criteria_[:-1], time_left, retry_interval)
previous_parent = previous_ctrl.element_info
ctrl_criteria = criteria_[-1]
ctrl_criteria["top_level_only"] = False
if "parent" not in ctrl_criteria:
ctrl_criteria["parent"] = previous_parent
if isinstance(ctrl_criteria["parent"], WindowSpecification):
time_left -= timestamp() - start
if time_left <= 0.0:
raise TimeoutError("Timed out: can not find parent {} for the control with the given"
"criteria {}.".format(ctrl_criteria['parent'], ctrl_criteria))
ctrl_criteria["parent"] = ctrl_criteria["parent"].find(time_left, retry_interval)
# resolve the controls and return it
if 'backend' not in ctrl_criteria:
ctrl_criteria['backend'] = self.backend.name
all_ctrls = findwindows.find_elements(**ctrl_criteria)
return [self.backend.generic_wrapper_class(ctrl) for ctrl in all_ctrls]
def find(self, timeout=None, retry_interval=None):
"""
Find a control using criteria. The returned control matches conditions from criteria[-1].
* **criteria** - a list with dictionaries
1st element is search criteria for the dialog
other elements are search criteria for a control of the dialog
* **timeout** - maximum length of time to try to find the controls (default 5)
* **retry_interval** - how long to wait between each retry (default .2)
"""
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
try:
ctrl = wait_until_passes(
timeout,
retry_interval,
self.__find_base,
(findwindows.ElementNotFoundError,
findbestmatch.MatchError,
controls.InvalidWindowHandle,
controls.InvalidElement),
self.criteria,
timeout,
retry_interval,
)
except TimeoutError as e:
raise e.original_exception
return ctrl
def find_all(self, timeout=None, retry_interval=None):
"""
Find all controls using criteria. The returned controls match conditions from criteria[-1].
Parent controls are assumed to exist in a single instance. Otherwise it will result in an ElementAmbiguousError.
* **criteria** - a list with dictionaries
1st element is search criteria for the dialog
other elements are search criteria for a control of the dialog
* **timeout** - maximum length of time to try to find the controls (default 5)
* **retry_interval** - how long to wait between each retry (default .09)
"""
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
try:
ctrls = wait_until_passes(
timeout,
retry_interval,
self.__find_all_base,
(findwindows.ElementNotFoundError,
findbestmatch.MatchError,
controls.InvalidWindowHandle,
controls.InvalidElement),
self.criteria,
timeout,
retry_interval,
)
except TimeoutError as e:
raise e.original_exception
return ctrls
def wait(self, wait_for, timeout=None, retry_interval=None):
"""
(DEPRECATED) Wait for the window to be in a particular state/states.
:param wait_for: The state to wait for the window to be in. It can
be any of the following states, also you may combine the states by space key.
* 'exists' means that the window is a valid handle
* 'visible' means that the window is not hidden
* 'enabled' means that the window is not disabled
* 'ready' means that the window is visible and enabled
* 'active' means that the window is active
:param timeout: Raise an :func:`pywinauto.timings.TimeoutError` if the window
is not in the appropriate state after this number of seconds.
Default: :py:attr:`pywinauto.timings.Timings.window_find_timeout`.
:param retry_interval: How long to sleep between each retry.
Default: :py:attr:`pywinauto.timings.Timings.window_find_retry`.
An example to wait until the dialog
exists, is ready, enabled and visible: ::
self.Dlg.wait("exists enabled visible ready")
.. seealso::
:func:`WindowSpecification.wait_not()`
:func:`pywinauto.timings.TimeoutError`
"""
warnings.warn("Wait method is deprecated and will be removed. "
"Please, use find() instead of wait() or wait('exists'). "
"wait_visible(), wait_enabled() and wait_active() are methods of "
"HwndWrapper object, so you can use it like .find().wait_active(), "
".find().wait_visible().wait_enabled(), etc.")
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
time_left = timeout
start = timestamp()
try:
ctrl = self.find(time_left, retry_interval)
except (findwindows.ElementNotFoundError,
findbestmatch.MatchError,
controls.InvalidWindowHandle,
controls.InvalidElement,
TimeoutError) as e:
raise TimeoutError('Timed out: can not find control with the given criteria {}'.format(self.criteria[-1]))
correct_wait_for = wait_for.lower().split()
if 'ready' in correct_wait_for:
correct_wait_for.remove('ready')
if 'visible' not in correct_wait_for:
correct_wait_for.append('visible')
if 'enabled' not in correct_wait_for:
correct_wait_for.append('enabled')
for condition in correct_wait_for:
time_left -= timestamp() - start
if time_left <= 0.0:
raise TimeoutError("Timed out: not enough time to check the condition {}.".format(condition))
if condition == 'exists':
continue
elif condition not in WindowSpecification.WAIT_CRITERIA_MAP.keys():
raise SyntaxError("Invalid criteria: {}!".format(condition))
else:
WindowSpecification.WAIT_CRITERIA_MAP[condition](ctrl, time_left, retry_interval)
return ctrl
def wait_not(self, wait_for, timeout=None, retry_interval=None):
"""
(DEPRECATED)Wait for the window to not be in a particular state/states.
:param wait_for_not: The state to wait for the window to not be in. It can be any
of the following states, also you may combine the states by space key.
* 'exists' means that the window is a valid handle
* 'visible' means that the window is not hidden
* 'enabled' means that the window is not disabled
* 'ready' means that the window is visible and enabled
* 'active' means that the window is active
:param timeout: Raise an :func:`pywinauto.timings.TimeoutError` if the window is sill in the
state after this number of seconds.
Default: :py:attr:`pywinauto.timings.Timings.window_find_timeout`.
:param retry_interval: How long to sleep between each retry.
Default: :py:attr:`pywinauto.timings.Timings.window_find_retry`.
An example to wait until the dialog is not ready, enabled or visible: ::
self.Dlg.wait_not("enabled visible ready")
.. seealso::
:func:`WindowSpecification.wait()`
:func:`pywinauto.timings.TimeoutError`
"""
warnings.warn("Wait_not method is deprecated and will be removed. "
"You can use not_exists() instead of wait_not('exists'). "
"wait_not_visible(), wait_not_enabled() and wait_not_active() are methods of "
"HwndWrapper object, so you can use it like .find().wait_not_active(), "
".find().wait_not_visible().wait_not_enabled(), etc.")
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
correct_wait_for = wait_for.lower().split()
if 'ready' in correct_wait_for:
correct_wait_for.remove('ready')
if 'visible' not in correct_wait_for:
correct_wait_for.append('visible')
if 'enabled' not in correct_wait_for:
correct_wait_for.append('enabled')
if 'exists' in correct_wait_for:
if not self.not_exists(timeout, retry_interval):
raise TimeoutError("Object with the given criteria {} still exists".format(self.criteria[-1]))
else:
time_left = timeout
start = timestamp()
try:
ctrl = self.find(time_left, retry_interval)
except (findwindows.ElementNotFoundError,
findbestmatch.MatchError,
controls.InvalidWindowHandle,
controls.InvalidElement,
TimeoutError) as e:
return
for condition in correct_wait_for:
time_left -= timestamp() - start
if time_left <= 0.0:
raise TimeoutError('Timed out: not enough time to check the condition {}'.format(condition))
if condition not in WindowSpecification.WAIT_NOT_CRITERIA_MAP.keys():
raise SyntaxError("Invalid criteria: {}!".format(condition))
else:
WindowSpecification.WAIT_NOT_CRITERIA_MAP[condition](ctrl, time_left, retry_interval)
def by(self, **criteria):
"""
Add criteria for a control
When this window specification is resolved it will be used
to match against a control.
"""
# default to non top level windows because we are usually
# looking for a control
if 'top_level_only' not in criteria:
criteria['top_level_only'] = False
new_item = WindowSpecification(self.criteria[0], allow_magic_lookup=self.allow_magic_lookup)
new_item.criteria.extend(self.criteria[1:])
new_item.criteria.append(criteria)
return new_item
def __getitem__(self, key):
"""
Allow access to dialogs/controls through item access
This allows::
app['DialogTitle']['ControlTextClass']
to be used to access dialogs and controls.
Both this and :func:`__getattribute__` use the rules outlined in the
HowTo document.
"""
# if we already have 2 levels of criteria (dlg, control)
# then resolve the control and do a getitem on it for the
if len(self.criteria) >= 2: # FIXME - this is surprising
ctrl = self.find()
# try to return a good error message if the control does not
# have a __getitem__() method)
if hasattr(ctrl, '__getitem__'):
return ctrl[key]
else:
message = "The control does not have a __getitem__ method " \
"for item access (i.e. ctrl[key]) so maybe you have " \
"requested this in error?"
raise AttributeError(message)
# if we get here then we must have only had one criteria so far
# so create a new :class:`WindowSpecification` for this control
new_item = WindowSpecification(self.criteria[0], allow_magic_lookup=self.allow_magic_lookup)
# add our new criteria
new_item.criteria.append({"best_match": key})
return new_item
def __getattribute__(self, attr_name):
"""
Attribute access for this class
If we already have criteria for both dialog and control then
resolve the control and return the requested attribute.
If we have only criteria for the dialog but the attribute
requested is an attribute of DialogWrapper then resolve the
dialog and return the requested attribute.
Otherwise delegate functionality to :func:`__getitem__` - which
sets the appropriate criteria for the control.
"""
allow_magic_lookup = object.__getattribute__(self, "allow_magic_lookup") # Beware of recursions here!
if not allow_magic_lookup:
try:
return object.__getattribute__(self, attr_name)
except AttributeError:
wrapper_object = self.find()
try:
return getattr(wrapper_object, attr_name)
except AttributeError:
message = (
'Attribute "%s" exists neither on %s object nor on'
'targeted %s element wrapper (typo? or set allow_magic_lookup to True?)' %
(attr_name, self.__class__, wrapper_object.__class__))
raise AttributeError(message)
if attr_name in ['__dict__', '__members__', '__methods__', '__class__', '__name__']:
return object.__getattribute__(self, attr_name)
if attr_name in dir(self.__class__):
return object.__getattribute__(self, attr_name)
if attr_name in self.__dict__:
return self.__dict__[attr_name]
# if we already have 2 levels of criteria (dlg, control)
# this third must be an attribute so resolve and get the
# attribute and return it
if len(self.criteria) >= 2: # FIXME - this is surprising
ctrl = self.find()
try:
return getattr(ctrl, attr_name)
except AttributeError:
return self.by(best_match=attr_name)
else:
# FIXME - I don't get this part at all, why is it win32-specific and why not keep the same logic as above?
# if we have been asked for an attribute of the dialog
# then resolve the window and return the attribute
desktop_wrapper = self.backend.generic_wrapper_class(self.backend.element_info_class())
need_to_resolve = (len(self.criteria) == 1 and hasattr(desktop_wrapper, attr_name))
if hasattr(self.backend, 'dialog_class'):
need_to_resolve = need_to_resolve and hasattr(self.backend.dialog_class, attr_name)
# Probably there is no DialogWrapper for another backend
if need_to_resolve:
ctrl = self.find()
return getattr(ctrl, attr_name)
# It is a dialog/control criterion so let getitem
# deal with it
return self[attr_name]
def exists(self, timeout=None, retry_interval=None):
"""
Wait for the window exists, return True if the control exists.
:param timeout: how much time (in seconds) to try to find the control.
Default: ``Timings.exists_timeout``.
:param retry_interval: how long to wait between each retry.
Default: ``Timings.exists_retry``.
"""
# set the current timings -couldn't set as defaults as they are
# evaluated at import time - and timings may be changed at any time
if timeout is None:
timeout = Timings.exists_timeout
if retry_interval is None:
retry_interval = Timings.exists_retry
# modify the criteria as exists should look for all
# windows - including not visible and disabled
exists_criteria = self.criteria[:]
for criterion in exists_criteria:
criterion['enabled'] = None
criterion['visible'] = None
try:
self.find(timeout, retry_interval)
return True
except (findwindows.ElementNotFoundError,
findbestmatch.MatchError,
controls.InvalidWindowHandle,
controls.InvalidElement,
TimeoutError):
return False
def not_exists(self, timeout=None, retry_interval=None):
"""
Wait for the window does not exist, return True if the control does not exist.
:param timeout: how much time (in seconds) to wait until the control exists.
Default: ``Timings.exists_timeout``.
:param retry_interval: how long to wait between each retry.
Default: ``Timings.exists_retry``.
"""
# set the current timings -couldn't set as defaults as they are
# evaluated at import time - and timings may be changed at any time
if timeout is None:
timeout = Timings.exists_timeout
if retry_interval is None:
retry_interval = Timings.exists_retry
# modify the criteria as exists should look for all
# windows - including not visible and disabled
exists_criteria = self.criteria[:]
for criterion in exists_criteria:
criterion['enabled'] = None
criterion['visible'] = None
try:
wait_until(timeout, retry_interval, self.exists, False)
return True
except (findwindows.ElementNotFoundError,
findbestmatch.MatchError,
controls.InvalidWindowHandle,
controls.InvalidElement,
TimeoutError):
return False
def dump_tree(self, depth=10, max_width=10, filename=None):
"""
Dump the 'identifiers' to console or a file
Dump identifiers for the control and for its descendants to
a depth of **depth** (the whole subtree if **None**).
:param depth: Max depth level of an element tree to dump (None: unlimited).
:param max_width: Max number of children of each element to dump (None: unlimited).
:param filename: Save tree to a specified file (None: print to stdout).
.. note:: The identifiers dumped by this method have been made
unique. So if you have 2 edit boxes, they won't both have "Edit"
listed in their identifiers. In fact the first one can be
referred to as "Edit", "Edit0", "Edit1" and the 2nd should be
referred to as "Edit2".
"""
if depth is None:
depth = sys.maxsize
if max_width is None:
max_width = sys.maxsize
# Wrap this control
this_ctrl = self.find()
ElementTreeNode = collections.namedtuple('ElementTreeNode', ['elem', 'id', 'children'])
def create_element_tree(element_list):
"""Build elements tree and create list with pre-order tree traversal"""
depth_limit_reached = False
width_limit_reached = False
current_id = 0
elem_stack = collections.deque([(this_ctrl, None, 0)])
root_node = ElementTreeNode(this_ctrl, current_id, [])
while elem_stack:
current_elem, current_elem_parent_children, current_node_depth = elem_stack.pop()
if current_elem is None:
elem_node = ElementTreeNode(None, current_id, [])
current_elem_parent_children.append(elem_node)
else:
if current_node_depth <= depth:
if current_elem_parent_children is not None:
current_id += 1
elem_node = ElementTreeNode(current_elem, current_id, [])
current_elem_parent_children.append(elem_node)
element_list.append(current_elem)
else:
elem_node = root_node
child_elements = current_elem.children()
if len(child_elements) > max_width and current_node_depth < depth:
elem_stack.append((None, elem_node.children, current_node_depth + 1))
width_limit_reached = True
for i in range(min(len(child_elements) - 1, max_width - 1), -1, -1):
elem_stack.append((child_elements[i], elem_node.children, current_node_depth + 1))
else:
depth_limit_reached = True
return root_node, depth_limit_reached, width_limit_reached
# Create a list of this control, all its descendants
all_ctrls = [this_ctrl]
# Build element tree
elements_tree, depth_limit_reached, width_limit_reached = create_element_tree(all_ctrls)
show_best_match_names = self.allow_magic_lookup and not (depth_limit_reached or width_limit_reached)
if show_best_match_names:
# Create a list of all visible text controls
txt_ctrls = [ctrl for ctrl in all_ctrls if ctrl.can_be_label and ctrl.is_visible() and ctrl.window_text()]
# Build a dictionary of disambiguated list of control names
name_ctrl_id_map = findbestmatch.UniqueDict()
for index, ctrl in enumerate(all_ctrls):
ctrl_names = findbestmatch.get_control_names(ctrl, all_ctrls, txt_ctrls)
for name in ctrl_names:
name_ctrl_id_map[name] = index
# Swap it around so that we are mapped off the control indices
ctrl_id_name_map = {}
for name, index in name_ctrl_id_map.items():
ctrl_id_name_map.setdefault(index, []).append(name)
def print_identifiers(element_node, current_depth=0, log_func=print):
"""Recursively print ids for ctrls and their descendants in a tree-like format"""
if current_depth == 0:
if depth_limit_reached:
log_func('Warning: the whole hierarchy does not fit into depth={}. '
'Increase depth parameter value or set it to None (unlimited, '
'may freeze in case of very large number of elements).'.format(depth))
if self.allow_magic_lookup and not show_best_match_names:
log_func('If the whole hierarchy fits into depth and max_width values, '
'best_match names are dumped.')
log_func("Control Identifiers:")
indent = current_depth * u" | "
output = indent + u'\n'
ctrl = element_node.elem
if ctrl is not None:
ctrl_id = element_node.id
ctrl_text = ctrl.window_text()
if ctrl_text:
# transform multi-line text to one liner
ctrl_text = repr(ctrl_text)
output += indent + u"{class_name} - {text} {rect}" \
"".format(class_name=ctrl.friendly_class_name(),
text=ctrl_text,
rect=ctrl.rectangle())
if show_best_match_names:
output += u'\n' + indent + u'{}'.format(ctrl_id_name_map[ctrl_id])
class_name = ctrl.class_name()
auto_id = None
control_type = None
if hasattr(ctrl.element_info, 'automation_id'):
auto_id = ctrl.element_info.automation_id
if hasattr(ctrl.element_info, 'control_type'):
control_type = ctrl.element_info.control_type
criteria_texts = []
if ctrl_text:
criteria_texts.append(u'name={}'.format(ctrl_text))
if class_name:
criteria_texts.append(u"class_name='{}'".format(class_name))
if auto_id:
criteria_texts.append(u"auto_id='{}'".format(auto_id))
if control_type:
criteria_texts.append(u"control_type='{}'".format(control_type))
if ctrl_text or class_name or auto_id:
output += u'\n' + indent + u'.by(' + u', '.join(criteria_texts) + u')'
else:
output += indent + u'**********\n'
output += indent + u'Max children output limit ({}) has been reached. ' \
u'Set a larger max_width value or use max_width=None ' \
u'to see all children.\n'.format(max_width)
output += indent + u'**********'
if six.PY3:
log_func(output)
else:
log_func(output.encode(locale.getpreferredencoding(), errors='backslashreplace'))
if current_depth <= depth:
for child_elem in element_node.children:
print_identifiers(child_elem, current_depth + 1, log_func)
if filename is None:
if six.PY3:
try:
encoding = sys.stdout.encoding
except AttributeError:
encoding = sys.getdefaultencoding()
else:
encoding = locale.getpreferredencoding()
print(u'# -*- coding: {} -*-'.format(encoding))
print_identifiers(elements_tree)
else:
with codecs.open(filename, "w", locale.getpreferredencoding(), errors="backslashreplace") as log_file:
def log_func(msg):
log_file.write(str(msg) + os.linesep)
log_func(u'# -*- coding: {} -*-'.format(locale.getpreferredencoding()))
print_identifiers(elements_tree, log_func=log_func)
print_control_identifiers = deprecated(dump_tree, deprecated_name='print_control_identifiers')
print_ctrl_ids = deprecated(dump_tree, deprecated_name='print_ctrl_ids')
#=========================================================================
class BaseApplication(object):
"""
Represents an application
.. implicitly document some private functions
.. automethod:: __getattribute__
.. automethod:: __getitem__
"""
def connect(self, **kwargs):
"""Connect to an already running process
The action is performed according to only one of parameters
:param pid: a process ID of the target
:param handle: a window handle of the target
:param path: a path used to launch the target
:param timeout: a timeout for process start (relevant if path is specified)
.. seealso::
:func:`pywinauto.findwindows.find_elements` - the keyword arguments that
are also can be used instead of **pid**, **handle** or **path**
"""
raise NotImplementedError()
def start(self, cmd_line, timeout=None, retry_interval=None,
create_new_console=False, wait_for_idle=True, work_dir=None):
"""Start the application as specified by **cmd_line**
:param cmd_line: a string with a path to launch the target
:param timeout: a timeout for process to start (optional)
:param retry_interval: retry interval (optional)
:param create_new_console: create a new console (optional)
:param wait_for_idle: wait for idle (optional)
:param work_dir: working directory (optional)
"""
raise NotImplementedError()
def cpu_usage(self, interval=None):
"""Return CPU usage percent during specified number of seconds"""
raise NotImplementedError()
def wait_cpu_usage_lower(self, threshold=2.5, timeout=None, usage_interval=None):
"""Wait until process CPU usage percentage is less than the specified threshold"""
if usage_interval is None:
usage_interval = Timings.cpu_usage_interval
if timeout is None:
timeout = Timings.cpu_usage_wait_timeout
start_time = timings.timestamp()
while self.cpu_usage(usage_interval) > threshold:
if timings.timestamp() - start_time > timeout:
raise RuntimeError('Waiting CPU load <= {}% timed out!'.format(threshold))
return self
def top_window(self):
"""Return WindowSpecification for a current top window of the application"""
if not self.process:
raise AppNotConnected("Please use start or connect before trying "
"anything else")
timeout = Timings.window_find_timeout
while timeout >= 0:
windows = findwindows.find_elements(pid=self.process,
backend=self.backend.name)
if windows:
break
time.sleep(Timings.window_find_retry)
timeout -= Timings.window_find_retry
else:
raise RuntimeError("No windows for that process could be found")
criteria = {}
criteria['backend'] = self.backend.name
if windows[0].handle:
criteria['handle'] = windows[0].handle
else:
criteria['name'] = windows[0].name
return WindowSpecification(criteria, allow_magic_lookup=self.allow_magic_lookup)
def active(self):
"""Return WindowSpecification for an active window of the application"""
if not self.process:
raise AppNotConnected("Please use start or connect before trying "
"anything else")
time.sleep(Timings.window_find_timeout)
# very simple
windows = findwindows.find_elements(pid=self.process,
active_only=True,
backend=self.backend.name)
if not windows:
raise RuntimeError("No Windows of that application are active")
criteria = {}
criteria['backend'] = self.backend.name
if windows[0].handle:
criteria['handle'] = windows[0].handle
else:
criteria['name'] = windows[0].name
return WindowSpecification(criteria, allow_magic_lookup=self.allow_magic_lookup)
def windows(self, **kwargs):
"""Return a list of wrapped top level windows of the application"""
if not self.process:
raise AppNotConnected("Please use start or connect before trying "
"anything else")
if 'backend' in kwargs:
raise ValueError('Using another backend for this Application '
'instance is not allowed! Create another app object.')
if 'visible' not in kwargs:
kwargs['visible'] = None
if 'enabled' not in kwargs:
kwargs['enabled'] = None
kwargs['pid'] = self.process
kwargs['backend'] = self.backend.name
if kwargs.get('top_level_only') is None:
kwargs['top_level_only'] = True
windows = findwindows.find_elements(**kwargs)
return [self.backend.generic_wrapper_class(win) for win in windows]
def window(self, **kwargs):
"""Return a window of the application
You can specify the same parameters as findwindows.find_windows.
It will add the process parameter to ensure that the window is from
the current process.
See :py:func:`pywinauto.findwindows.find_elements` for the full parameters description.
"""
if 'backend' in kwargs:
raise ValueError('Using another backend than set in the app constructor is not allowed!')
kwargs['backend'] = self.backend.name
if kwargs.get('top_level_only') is None:
kwargs['top_level_only'] = True
# TODO: figure out how to eliminate this workaround
if self.backend.name == 'win32':
kwargs['visible'] = True
if not self.process:
raise AppNotConnected("Please use start or connect before trying "
"anything else")
else:
# add the restriction for this particular application
kwargs['app'] = self
win_spec = WindowSpecification(kwargs, allow_magic_lookup=self.allow_magic_lookup)
return win_spec
Window_ = window_ = window
def __getitem__(self, key):
"""Find the specified dialog of the application"""
# delegate searching functionality to self.window()
return self.window(best_match=key)
def __getattribute__(self, attr_name):
"""Find the specified dialog of the application"""
if attr_name in ['__dict__', '__members__', '__methods__', '__class__']:
return object.__getattribute__(self, attr_name)
if attr_name in dir(self.__class__):
return object.__getattribute__(self, attr_name)
if attr_name in self.__dict__:
return self.__dict__[attr_name]
# delegate all functionality to item access
return self[attr_name]
def kill(self, soft=False):
"""
Try to close and kill the application
Dialogs may pop up asking to save data - but the application
will be killed anyway - you will not be able to click the buttons.
This should only be used when it is OK to kill the process like you
would do in task manager.
"""
raise NotImplementedError()
def is_process_running(self):
"""
Checks that process is running.
Can be called before start/connect.
Returns True if process is running otherwise - False
"""
raise NotImplementedError()
def wait_for_process_exit(self, timeout=None, retry_interval=None):
"""
Waits for process to exit until timeout reaches
Raises TimeoutError exception if timeout was reached
"""
if timeout is None:
timeout = Timings.app_exit_timeout
if retry_interval is None:
retry_interval = Timings.app_exit_retry
wait_until(timeout, retry_interval, self.is_process_running, value=False)
|
pywinauto/pywinauto
|
pywinauto/base_application.py
|
Python
|
bsd-3-clause
| 44,969
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
from pylib import android_commands
from pylib import constants
from pylib import pexpect
from pylib.base import base_test_result
from pylib.base import base_test_runner
def _TestSuiteRequiresMockTestServer(suite_name):
"""Returns True if the test suite requires mock test server."""
tests_require_net_test_server = ['unit_tests', 'net_unittests',
'content_unittests',
'content_browsertests']
return (suite_name in
tests_require_net_test_server)
class TestRunner(base_test_runner.BaseTestRunner):
def __init__(self, test_options, device, test_package):
"""Single test suite attached to a single device.
Args:
test_options: A GTestOptions object.
device: Device to run the tests.
test_package: An instance of TestPackage class.
"""
super(TestRunner, self).__init__(device, test_options.tool,
test_options.push_deps,
test_options.cleanup_test_files)
self.test_package = test_package
self.test_package.tool = self.tool
self._test_arguments = test_options.test_arguments
timeout = test_options.timeout
if timeout == 0:
timeout = 60
# On a VM (e.g. chromium buildbots), this timeout is way too small.
if os.environ.get('BUILDBOT_SLAVENAME'):
timeout = timeout * 2
self._timeout = timeout * self.tool.GetTimeoutScale()
#override
def InstallTestPackage(self):
self.test_package.Install(self.adb)
def GetAllTests(self):
"""Install test package and get a list of all tests."""
self.test_package.Install(self.adb)
return self.test_package.GetAllTests(self.adb)
#override
def PushDataDeps(self):
self.adb.WaitForSdCardReady(20)
self.tool.CopyFiles()
if os.path.exists(constants.ISOLATE_DEPS_DIR):
device_dir = self.adb.GetExternalStorage()
# TODO(frankf): linux_dumper_unittest_helper needs to be in the same dir
# as breakpad_unittests exe. Find a better way to do this.
if self.test_package.suite_name == 'breakpad_unittests':
device_dir = constants.TEST_EXECUTABLE_DIR
for p in os.listdir(constants.ISOLATE_DEPS_DIR):
self.adb.PushIfNeeded(
os.path.join(constants.ISOLATE_DEPS_DIR, p),
os.path.join(device_dir, p))
def _ParseTestOutput(self, p):
"""Process the test output.
Args:
p: An instance of pexpect spawn class.
Returns:
A TestRunResults object.
"""
results = base_test_result.TestRunResults()
# Test case statuses.
re_run = re.compile('\[ RUN \] ?(.*)\r\n')
re_fail = re.compile('\[ FAILED \] ?(.*)\r\n')
re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n')
# Test run statuses.
re_passed = re.compile('\[ PASSED \] ?(.*)\r\n')
re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
# Signal handlers are installed before starting tests
# to output the CRASHED marker when a crash happens.
re_crash = re.compile('\[ CRASHED \](.*)\r\n')
log = ''
try:
while True:
full_test_name = None
found = p.expect([re_run, re_passed, re_runner_fail],
timeout=self._timeout)
if found == 1: # re_passed
break
elif found == 2: # re_runner_fail
break
else: # re_run
full_test_name = p.match.group(1).replace('\r', '')
found = p.expect([re_ok, re_fail, re_crash], timeout=self._timeout)
log = p.before.replace('\r', '')
if found == 0: # re_ok
if full_test_name == p.match.group(1).replace('\r', ''):
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.PASS,
log=log))
elif found == 2: # re_crash
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.CRASH,
log=log))
break
else: # re_fail
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.FAIL, log=log))
except pexpect.EOF:
logging.error('Test terminated - EOF')
# We're here because either the device went offline, or the test harness
# crashed without outputting the CRASHED marker (crbug.com/175538).
if not self.adb.IsOnline():
raise android_commands.errors.DeviceUnresponsiveError(
'Device %s went offline.' % self.device)
if full_test_name:
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.CRASH,
log=p.before.replace('\r', '')))
except pexpect.TIMEOUT:
logging.error('Test terminated after %d second timeout.',
self._timeout)
if full_test_name:
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.TIMEOUT,
log=p.before.replace('\r', '')))
finally:
p.close()
ret_code = self.test_package.GetGTestReturnCode(self.adb)
if ret_code:
logging.critical(
'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
ret_code, p.before, p.after)
return results
#override
def RunTest(self, test):
test_results = base_test_result.TestRunResults()
if not test:
return test_results, None
try:
self.test_package.ClearApplicationState(self.adb)
self.test_package.CreateCommandLineFileOnDevice(
self.adb, test, self._test_arguments)
test_results = self._ParseTestOutput(
self.test_package.SpawnTestProcess(self.adb))
finally:
self.CleanupSpawningServerState()
# Calculate unknown test results.
all_tests = set(test.split(':'))
all_tests_ran = set([t.GetName() for t in test_results.GetAll()])
unknown_tests = all_tests - all_tests_ran
test_results.AddResults(
[base_test_result.BaseTestResult(t, base_test_result.ResultType.UNKNOWN)
for t in unknown_tests])
retry = ':'.join([t.GetName() for t in test_results.GetNotPass()])
return test_results, retry
#override
def SetUp(self):
"""Sets up necessary test enviroment for the test suite."""
super(TestRunner, self).SetUp()
if _TestSuiteRequiresMockTestServer(self.test_package.suite_name):
self.LaunchChromeTestServerSpawner()
self.tool.SetupEnvironment()
#override
def TearDown(self):
"""Cleans up the test enviroment for the test suite."""
self.test_package.ClearApplicationState(self.adb)
self.tool.CleanUpEnvironment()
super(TestRunner, self).TearDown()
|
mogoweb/chromium-crosswalk
|
build/android/pylib/gtest/test_runner.py
|
Python
|
bsd-3-clause
| 6,990
|
import asyncio
import configparser
import json
import logging
import os
import os.path
import py.error
import pytest
import re
import sys
from collections.abc import Mapping
from functools import wraps
from subprocess import Popen, PIPE
from time import sleep
__tracebackhide__ = True
HERE = os.path.dirname(os.path.abspath(__file__))
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def fail(msg, *args):
if args:
msg = msg % args
raise InvocationError(msg)
class InvocationError(py.error.Error):
pass
def async_test(f):
@wraps(f)
def wrapper(*args, **kwargs):
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
pending = asyncio.Task.all_tasks()
if pending:
loop.run_until_complete(asyncio.wait(pending))
return wrapper
def extract(src, pattern):
try:
return re.search(pattern, src).group(1)
except AttributeError:
pass
class Vault:
def __init__(self, name):
self.name = name
self._proc = None
self._data = None
@property
def config(self):
return self._data
def start(self):
if self._proc:
fail('Vault %s is already running', self.name)
env = os.environ.copy()
env.setdefault('GOMAXPROCS', '2')
clean = Popen(['killall', 'vault'],
stdout=PIPE,
stderr=PIPE,
env=env,
shell=False)
clean.communicate()
args = ['vault', 'server', '-dev']
proc = Popen(args, stdout=PIPE, stderr=PIPE, env=env, shell=False)
self._proc = proc
pid = proc.pid
logging.info('Starting %s [%s]', self.name, pid)
buf = ''
while 'Vault server started!' not in buf:
buf += proc.stdout.read(1).decode('utf-8')
logging.debug(buf)
data = {
'addr': extract(buf, 'VAULT_ADDR=\'(.+)\''),
'unseal_key': extract(buf, 'Unseal Key: ([\w-]+)\W'),
'root_token': extract(buf, 'Root Token: ([\w-]+)'),
}
env.setdefault('VAULT_ADDR', data['addr'])
proc_kwargs = {'stdout': PIPE,
'stderr': PIPE,
'env': env,
'shell': False}
for i in range(60):
with Popen(['vault', 'status'], **proc_kwargs) as sub:
stdout, stderr = sub.communicate(timeout=5)
if sub.returncode in (0, 1):
buf = stdout.decode('utf-8')
break
sleep(1)
else:
fail('Unable to start %s [%s]', self.name, pid)
try:
os.kill(pid, 0)
except OSError:
fail('Failed to keep %s [%s] running', self.name, pid)
data.update({
'sealed': extract(buf, 'Sealed: (\w+)') == 'true',
'shares': int(extract(buf, 'Key Shares: (\d+)')),
'threshold': int(extract(buf, 'Key Threshold: (\d+)')),
'progress': int(extract(buf, 'Unseal Progress: (\d+)')),
'ha': extract(buf, 'High-Availability Enabled: (\w+)') == 'true',
})
self._data = Namespace()
self._data.update(data)
logging.info('Vault %s [%s] is ready to rock %s', self.name, pid, data)
def stop(self):
if not self._proc:
fail('Vault %s is not running', self.name)
result = self._proc.terminate()
self._proc = None
return result
def __repr__(self):
return '<Vault(name=%r)>' % self.name
class VaultTLS:
def __init__(self, name, *, server_config):
self.name = name
self._proc = None
self._data = None
self.server_config = server_config
@property
def config(self):
return self._data
def start(self):
if self._proc:
fail('Vault %s is already running', self.name)
with open(self.server_config) as file:
configuration = json.load(file)['listener']['tcp']
if configuration.get('tls_disable', False):
addr = 'http://%s' % configuration['address']
else:
addr = 'https://%s' % configuration['address']
base = os.path.dirname(self.server_config)
data = {
'addr': addr,
'key': os.path.join(base, 'server.key'),
'crt': os.path.join(base, 'server.crt'),
'csr': os.path.join(base, 'server.csr'),
}
self._data = Namespace()
self._data.update(data)
env = os.environ.copy()
env.setdefault('GOMAXPROCS', '2')
# env['SSL_CERT_DIR'] = os.path.join(HERE, 'certs')
clean = Popen(['killall', 'vault'],
stdout=PIPE, stderr=PIPE, env=env, shell=False)
clean.communicate()
cwd = os.path.dirname(self.server_config)
args = ['vault', 'server', '-config', self.server_config]
proc = Popen(args,
stdout=PIPE,
stderr=PIPE,
env=env,
shell=False,
cwd=cwd)
self._proc = proc
pid = proc.pid
logging.info('Starting %s [%s]', self.name, pid)
buf = ''
while 'Vault server started!' not in buf:
buf += proc.stdout.read(1).decode('utf-8')
logging.debug(buf)
try:
os.kill(pid, 0)
except OSError:
fail('Failed to keep %s [%s] running', self.name, pid)
logging.info('Vault %s [%s] is ready to rock %s', self.name, pid, data)
def stop(self):
if not self._proc:
fail('Node %s is not running', self.name)
result = self._proc.terminate()
self._proc = None
return result
def __repr__(self):
return '<Vault(name=%r, server_config=%r)>' % (self.name,
self.server_config)
class Consul(object):
def __init__(self, name, config_file, server=False, leader=False):
self.name = name
self.config_file = config_file
self.server = server
self.leader = leader
self._proc = None
@property
def config(self):
with open(self.config_file) as file:
response = Namespace()
response.update({'address': '127.0.0.1:8500'})
response.update(json.load(file))
return response
def start(self):
if self._proc:
fail('Node %s is already running', self.name)
# reset tmp store
Popen(['rm', '-rf', self.config.data_dir]).communicate()
env = os.environ.copy()
env.setdefault('GOMAXPROCS', '2')
proc = Popen(['consul', 'agent', '-config-file=%s' % self.config_file],
stdout=PIPE, stderr=PIPE, env=env, shell=False)
self._proc = proc
pid = proc.pid
logging.info('Starting %s [%s]' % (self.name, pid))
for i in range(60):
with Popen(['consul', 'info'], stdout=PIPE, stderr=PIPE) as sub:
stdout, stderr = sub.communicate(timeout=5)
if self.leader:
if 'leader = true' in stdout.decode('utf-8'):
break
elif self.server:
if 'server = true' in stdout.decode('utf-8'):
break
elif not sub.returncode:
break
sleep(1)
else:
fail('Unable to start %s [%s]', self.name, pid)
try:
os.kill(pid, 0)
except OSError:
fail('Failed to keep %s [%s] running', self.name, pid)
logging.info('Consul %s [%s] is ready to rock', self.name, pid)
def stop(self):
if not self._proc:
fail('Node %s is not running', self.name)
result = self._proc.terminate()
self._proc = None
return result
@pytest.fixture(scope='function', autouse=False)
def consul(request):
config_file = os.path.join(HERE, 'consul-server.json')
server = Consul('leader', config_file, True, True)
server.start()
request.addfinalizer(server.stop)
return server.config
@pytest.fixture(scope='function', autouse=False)
def dev_server(request):
server = Vault('dev')
server.start()
request.addfinalizer(server.stop)
return server.config
@pytest.fixture(scope='function', autouse=False)
def server(request):
conf = os.path.join(HERE, 'certs/server.json')
server = VaultTLS('https', server_config=conf)
server.start()
request.addfinalizer(server.stop)
return server.config
@pytest.fixture(scope='session', autouse=True)
def env(request):
response = Namespace()
response.update(os.environ)
response.CERT_PATH = os.path.join(HERE, 'certs')
config = configparser.ConfigParser()
config.optionxform = str # disable case transformation
config.read(['vault-test.ini', os.path.expanduser('~/.vault-test.ini')])
if config.has_section('env'):
response.update(config.items('env'))
return response
class Namespace:
def update(self, data):
if isinstance(data, Mapping):
data = data.items()
for k, v in data:
setattr(self, k, v)
|
johnnoone/aiovault
|
tests/conftest.py
|
Python
|
bsd-3-clause
| 9,455
|
'''
Created on Feb 21, 2014
@author: rkourtz
'''
import json
import pynuodb
class sql():
def __init__(self,
dbname,
host,
username,
password,
options
):
self.dbname = dbname
self.host = host
self.username = username
self.password = password
self.options = options
self.connect()
def close(self):
self.connection.close()
def commit(self):
cursor = self.execution.cursor()
try:
cursor.execute("COMMIT")
return True
except:
return False
def connect(self):
self.connection = pynuodb.connect(self.dbname, self.host, self.username, self.password, self.options)
def execute(self, command, autocommit = False, associative = False):
cursor = self.connection.cursor()
try:
cursor.execute(command)
except pynuodb.session.SessionException, e:
self.connect()
cursor = self.connection.cursor()
cursor.execute(command)
if autocommit:
cursor.execute("COMMIT")
if str(command.split(" ")[0]).lower() != "select" or not associative:
if cursor._result_set != None:
results = cursor.fetchall()
else:
results = True
cursor.close()
return results
else:
returnval = []
desc = cursor.description
results = cursor.fetchall()
cursor.close()
for rownum, row in enumerate(results):
returnrow = {}
for idx, field in enumerate(row):
returnrow[desc[idx][0]] = field
returnval.append(returnrow)
return returnval
def make_insert_sql(self, table, fields = []):
p = "INSERT INTO %s " % table
d1 = []
d2 = []
if isinstance(fields, dict):
for field in fields.keys():
d1.append(field)
value = str(fields[field])
if value.isdigit():
d2.append(value)
else:
d2.append("'%s'" % value)
elif isinstance(fields, list) or isinstance(field, tuple):
for field in fields:
if isinstance(field, dict):
for key in field.keys():
d1.append(key)
if isinstance(field['key'], str):
d2.append("'%s'" % field['key'])
else:
d2.append(str(field['key']))
elif isinstance(field, tuple) or isinstance(field, list):
d1.append(field[0])
if isinstance(field[1], str):
d2.append("'%s'" % field[1])
else:
d2.append(str(field[1]))
else:
raise Error("fields passed must be an array or list or tuples, or an array of dicts. Got %s" % json.dumps(fields))
if len(d1) == len(d2) and len(d1) > 0:
return " ".join([p, "(",", ".join(d1), ") VALUES (", ", ".join(d2), ")"])
class Error(Exception):
pass
|
nuodb/nuodbTools
|
nuodbTools/cluster/sql.py
|
Python
|
bsd-3-clause
| 3,027
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from ._base import _validate
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def gini_index(data, method='rectangles'):
r"""Calculate the Gini index.
The Gini index is defined as
.. math::
G=\frac{A}{A+B}
where :math:`A` is the area between :math:`y=x` and the Lorenz curve and
:math:`B` is the area under the Lorenz curve. Simplifies to :math:`1-2B`
since :math:`A+B=0.5`.
Parameters
----------
data : 1-D array_like
Vector of counts, abundances, proportions, etc. All entries must be
non-negative.
method : {'rectangles', 'trapezoids'}
Method for calculating the area under the Lorenz curve. If
``'rectangles'``, connects the Lorenz curve points by lines parallel to
the x axis. This is the correct method (in our opinion) though
``'trapezoids'`` might be desirable in some circumstances. If
``'trapezoids'``, connects the Lorenz curve points by linear segments
between them. Basically assumes that the given sampling is accurate and
that more features of given data would fall on linear gradients between
the values of this data.
Returns
-------
double
Gini index.
Raises
------
ValueError
If `method` isn't one of the supported methods for calculating the area
under the curve.
Notes
-----
The Gini index was introduced in [1]_. The formula for
``method='rectangles'`` is
.. math::
dx\sum_{i=1}^n h_i
The formula for ``method='trapezoids'`` is
.. math::
dx(\frac{h_0+h_n}{2}+\sum_{i=1}^{n-1} h_i)
References
----------
.. [1] Gini, C. (1912). "Variability and Mutability", C. Cuppini, Bologna,
156 pages. Reprinted in Memorie di metodologica statistica (Ed. Pizetti
E, Salvemini, T). Rome: Libreria Eredi Virgilio Veschi (1955).
"""
# Suppress cast to int because this method supports ints and floats.
data = _validate(data, suppress_cast=True)
lorenz_points = _lorenz_curve(data)
B = _lorenz_curve_integrator(lorenz_points, method)
return 1 - 2 * B
def _lorenz_curve(data):
"""Calculate the Lorenz curve for input data.
Notes
-----
Formula available on wikipedia.
"""
sorted_data = np.sort(data)
Sn = sorted_data.sum()
n = sorted_data.shape[0]
return np.arange(1, n + 1) / n, sorted_data.cumsum() / Sn
def _lorenz_curve_integrator(lc_pts, method):
"""Calculates the area under a Lorenz curve.
Notes
-----
Could be utilized for integrating other simple, non-pathological
"functions" where width of the trapezoids is constant.
"""
x, y = lc_pts
# each point differs by 1/n
dx = 1 / x.shape[0]
if method == 'trapezoids':
# 0 percent of the population has zero percent of the goods
h_0 = 0.0
h_n = y[-1]
# the 0th entry is at x=1/n
sum_hs = y[:-1].sum()
return dx * ((h_0 + h_n) / 2 + sum_hs)
elif method == 'rectangles':
return dx * y.sum()
else:
raise ValueError("Method '%s' not implemented. Available methods: "
"'rectangles', 'trapezoids'." % method)
|
xguse/scikit-bio
|
skbio/diversity/alpha/_gini.py
|
Python
|
bsd-3-clause
| 3,692
|
import permstruct
import permstruct.dag
from permuta import Permutations
# Since we usually don't want overlays:
overlays = False
# In most of the test cases below we do not include symmetries
# R = [[2,3,1], [1,5,4,3,2]]
# perm_prop = lambda p: all( p.avoids(x) for x in R)
# perm_bound = 7
# # inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# inp_dag = permstruct.dag.N_P_X1_taylored_for_av_231_15432(perm_prop, perm_bound)
# # inp_dag = permstruct.dag.len_3_pairs(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# ignored = 1
# R = [[2,3,1], [1,5,4,2,3]]
# perm_prop = lambda p: all( p.avoids(x) for x in R)
# perm_bound = 7
# # inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# inp_dag = permstruct.dag.N_P_X1_taylored_for_av_231_15423(perm_prop, perm_bound)
# # inp_dag = permstruct.dag.len_3_pairs(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# ignored = 1
# R = [[2,3,1], [1,5,3,2,4]]
# perm_prop = lambda p: all( p.avoids(x) for x in R)
# perm_bound = 7
# # inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# inp_dag = permstruct.dag.N_P_X1_taylored_for_av_231_15324(perm_prop, perm_bound)
# # inp_dag = permstruct.dag.len_3_pairs(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# ignored = 1
# R = [[2,3,1], [1,5,2,3,4]]
# perm_prop = lambda p: all( p.avoids(x) for x in R)
# perm_bound = 7
# # inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# inp_dag = permstruct.dag.N_P_X1_taylored_for_av_231_15234(perm_prop, perm_bound)
# # inp_dag = permstruct.dag.len_3_pairs(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# ignored = 1
# R = [[2,3,1], [1,2,5,3,4]]
# perm_prop = lambda p: all( p.avoids(x) for x in R)
# perm_bound = 7
# # inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# inp_dag = permstruct.dag.N_P_X_taylored_for_av_231_12534(perm_prop, perm_bound)
# # inp_dag = permstruct.dag.len_3_pairs(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# ignored = 1
# for n in range(6):
# for perm in Permutations(n):
# if perm.avoids([2,3,1]):
# print perm,
# print ""
# print ""
#------------------------------------------------#
# Avoiding two classical patterns of length 4
# JUST THE CLASSES WITH POLYNOMIAL GENERATING FUNCTIONS TO BEGIN WITH
#-- Wilf class 1 --#
# This one is finite like Av(123, 321) so we can do it if we
# allow enough rules, and allow them to be large enough
# perm_prop = lambda p: p.avoids([4,3,2,1]) and p.avoids([1,2,3,4])
#-- Wilf class 2 --#
# No success with
# perm_bound = 6
# dag = permstruct.dag.incr_decr_nonempty(perm_prop, perm_bound)
# max_rule_size = (4, 4)
# max_non_empty = 5
# perm_prop = lambda p: p.avoids([4,3,1,2]) and p.avoids([1,2,3,4])
#-- Wilf class 5 --#
#perm_prop = lambda p: p.avoids([4,3,2,1]) and p.avoids([1,3,2,4])
#-- Separable --#
# R = [[2,4,1,3], [3,1,4,2], [2,1,4,3], [3,4,1,2]]
# perm_prop = lambda p: all( p.avoids(x) for x in R)
# perm_prop = lambda p: p.avoids([2,4,1,3]) and p.avoids([3,1,4,2])
# perm_prop = lambda p: p.avoids([2,1,4,3]) and p.avoids([3,4,1,2])
# perm_prop = lambda p: p.avoids([4,2,3,1]) and p.avoids([3,2,4,1])
# Atkinson, big example from p. 31
# No luck
# R = [[1,2,3,4], [1,2,4,3], [1,3,2,4], [2,1,3,4], [1,4,5,2,3], [3,4,1,2,5], [3,5,1,6,2,4], [3,5,6,1,2,4], [4,5,1,6,2,3], [4,5,6,1,2,3]]
# perm_prop = lambda p: all( p.avoids(x) for x in R)
# perm_bound = 7
# inp_dag = permstruct.dag.len_3_pairs(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
# Obtained by a rifle shuffle of a deck of cards. See Atkinson p. 29
# Note that we have already done Av(321,2143). See Prop 3.4. Might be
# good to illustrate how Struct can do most of the work here.
perm_prop = lambda p: p.avoids([3,2,1]) and p.avoids([2,1,4,3]) and p.avoids([2,4,1,3])
# perm_prop = lambda p: p.avoids([3,2,1]) and p.avoids([2,1,4,3]) and p.avoids([2,4,1,3]) and p.avoids([3,1,4,2])
perm_bound = 7
inp_dag = permstruct.dag.N_P_X_mon1(perm_prop, perm_bound)
max_rule_size = (3, 3)
max_non_empty = 3
max_rules = 100
ignored = 1
#------------------------------------------------#
# The following classical patterns of length 4 are broken down
# by Wilf-classes
# Can we do any of these?
#-- Wilf class 1 --#
# perm_prop = lambda p: p.avoids([1,3,4,2])
# perm_prop = lambda p: p.avoids([2,4,1,3])
#-- Wilf class 2 --#
# perm_prop = lambda p: p.avoids([1,2,3,4])
# perm_prop = lambda p: p.avoids([1,2,4,3])
# perm_prop = lambda p: p.avoids([1,4,3,2])
# perm_prop = lambda p: p.avoids([2,1,4,3])
#-- Wilf class 3 --#
# perm_prop = lambda p: p.avoids([1,3,2,4])
# Nothing non-trivial found with the settings below
# Will at least need to allow avoiders of a single pattern
# of length 3
# perm_bound = 7
# inp_dag = permstruct.dag.len_3_pairs(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
# def is_Baxter(perm):
# n = len(perm)
# if n <= 3: return True
# for i in range(n-3):
# for j in range(i+1,n-2):
# for k in range(j+2,n):
# if (perm[j+1] < perm[i] < perm[k] < perm[j]) or (perm[j] < perm[k] < perm[i] < perm[j+1]):
# return False
# return True
# perm_prop = lambda p: is_Baxter(p)
# perm_bound = 7
# # inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# inp_dag = permstruct.dag.classic_avoiders_length_3(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#------------------------------------------------#
# perm_bound = 6
# inp_dag = permstruct.dag.elementary(perm_prop, perm_bound)
# inp_dag = permstruct.dag.incr_decr(perm_prop, perm_bound)
# overlay_dag = permstruct.dag.elementary(perm_prop, perm_bound)
# overlay_dag = permstruct.dag.x_dag(perm_prop, perm_bound)
# inp_dag = permstruct.dag.incr_decr_nonempty(perm_prop, perm_bound)
# inp_dag = permstruct.dag.decr_dag(perm_prop, perm_bound)
# inp_dag = permstruct.dag.classic_avoiders_length_3(perm_prop, perm_bound)
# inp_dag = permstruct.dag.classic_avoiders_length_3_with_input_without_incrdecr(perm_prop, perm_bound)
# inp_dag = permstruct.dag.len_3_pairs(perm_prop, perm_bound)
# max_rule_size = (1, 1)
# max_non_empty = 4
# max_rules = 100
# ignored = 0
# For exhaustive_with_overlays
# overlay_dag = permstruct.dag.x_dag(perm_prop, perm_bound)
# max_overlay_cnt = 1
# max_overlay_size = (1, 3)
# overlays = False
if not overlays:
permstruct.exhaustive(perm_prop,
perm_bound,
inp_dag,
max_rule_size,
max_non_empty,
max_rules,
ignore_first = ignored)
else:
permstruct.exhaustive_with_overlays(perm_prop,
perm_bound,
inp_dag,
max_rule_size,
max_non_empty,
max_rules,
overlay_dag,
max_overlay_cnt,
max_overlay_size,
min_rule_size=(1,1))
|
PermutaTriangle/PermStruct
|
scratch/scratch_Henning.py
|
Python
|
bsd-3-clause
| 7,679
|
from pygiftgrab import Codec, ColourSpace
def pytest_addoption(parser):
parser.addoption('--codec', action='store',
help='Codec (HEVC, Xvid, or VP9)')
parser.addoption('--colour-space', action='store',
help='Colour space specification (BGRA, I420, or UYVY)')
def pytest_generate_tests(metafunc):
if 'codec' in metafunc.fixturenames:
codec_str = str(metafunc.config.option.codec)
case_insensitive = codec_str.lower()
if case_insensitive == 'hevc':
codec = Codec.HEVC
elif case_insensitive == 'xvid':
codec = Codec.Xvid
elif case_insensitive == 'vp9':
codec = Codec.VP9
else:
raise RuntimeError('Could not recognise codec ' +
codec_str)
metafunc.parametrize('codec', [codec])
if 'colour_space' in metafunc.fixturenames:
colour_space_str = str(metafunc.config.option.colour_space)
case_insensitive = colour_space_str.lower()
if case_insensitive == 'bgra':
colour_space = ColourSpace.BGRA
elif case_insensitive == 'i420':
colour_space = ColourSpace.I420
elif case_insensitive == 'uyvy':
colour_space = ColourSpace.UYVY
else:
raise RuntimeError('Could not recognise colour space ' +
colour_space_str)
metafunc.parametrize('colour_space', [colour_space])
|
gift-surg/GIFT-Grab
|
src/tests/target/conftest.py
|
Python
|
bsd-3-clause
| 1,482
|
"""
A couple of auxiliary functions
"""
import numpy as np
def sidekick(w1, w2, dt, T, A=1):
"""
This function crates the sidekick time series provided
the two mixing frequencies w1, w2, the time resolution dt
and the total time T.
returns the expresion A * (cos(w1 * t) + cos(w2 * t))
where t goes from 0 to int (T / dt).
"""
Nt = int(T / dt)
time = np.arange(Nt) * dt
A1 = np.cos(w1 * time)
A2 = np.cos(w2 * time)
A3 = A1 + A2
return A3 * A
def bump(t, offset, center, Max):
"""
This function creates a bump with a quadratic
function. The center of the function is at the
center. The zeros are at center +- offset and
finally the value of the funtion at center is
Max.
Note that this function does not take caret that the
offsets are inside the vector t.
"""
beta = offset ** 2
D = Max / beta
y = D * (beta - (t - center)**2)
y[ y < 0] = 0
return y
def gaussian_bump(x, mean=0, maximum=1.0, baseline=0, HWHM_a=1.0, attenuation=2):
"""
Returns a gaussian bump (something of the shape exp(-x^2)) with
a given maximum and a given baseline.
Parameters
-----------
x : the values of the argument
mean : the value at which the function is centered
maximum : the maximum value that the function attains at the mean
baseline : the value far from the mean, the baseline (zero shited)
HWHM_a : This come from the half width at half maximum terminology.
In this case it denotes the value of the argument at which the
function will have attenuated by the attenuation value (next arg)
attenuation value: the attenuation value of the HWHM_a.
In brief for the last two arguments:
gaussian_bumps(HWHM_a) = maximum / attenuation
"""
# First we will calculate sigma based on all the other values
arg1 = attenuation * (maximum - baseline)
arg2 = (maximum- attenuation * baseline)
A = np.log(arg1 / arg2)
sigma = HWHM_a/ np.sqrt(A)
argument = (x - mean) / sigma
gaussian = np.exp(-argument ** 2)
return (maximum - baseline) * gaussian + baseline
def combine_gaussian_bumps(gaussian_bumps, baseline):
"""
This functions takes a list of gaussian_bumps
and returns the appropriate sum subtracting the
"""
sum_g = gaussian_bumps.pop()
for gb in gaussian_bumps:
sum_g += gb
sum_g -= baseline
return sum_g
|
h-mayorquin/time_series_basic
|
signals/aux_functions.py
|
Python
|
bsd-3-clause
| 2,499
|
"""
byceps.services.board.category_query_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Optional, Sequence
from ...database import db
from .dbmodels.category import Category as DbCategory
from .transfer.models import (
BoardID,
Category,
CategoryID,
CategoryWithLastUpdate,
)
def count_categories_for_board(board_id: BoardID) -> int:
"""Return the number of categories for that board."""
return db.session \
.query(DbCategory) \
.filter_by(board_id=board_id) \
.count()
def find_category_by_id(category_id: CategoryID) -> Optional[Category]:
"""Return the category with that id, or `None` if not found."""
category = db.session.query(DbCategory).get(category_id)
if category is None:
return None
return _db_entity_to_category(category)
def find_category_by_slug(board_id: BoardID, slug: str) -> Optional[Category]:
"""Return the category for that board and slug, or `None` if not found."""
category = db.session \
.query(DbCategory) \
.filter_by(board_id=board_id) \
.filter_by(slug=slug) \
.first()
if category is None:
return None
return _db_entity_to_category(category)
def get_categories(board_id: BoardID) -> Sequence[Category]:
"""Return all categories for that board, ordered by position."""
categories = db.session \
.query(DbCategory) \
.filter_by(board_id=board_id) \
.order_by(DbCategory.position) \
.all()
return [_db_entity_to_category(category) for category in categories]
def get_categories_excluding(
board_id: BoardID, category_id: CategoryID
) -> Sequence[Category]:
"""Return all categories for that board except for the specified one."""
categories = db.session \
.query(DbCategory) \
.filter_by(board_id=board_id) \
.filter(DbCategory.id != category_id) \
.order_by(DbCategory.position) \
.all()
return [_db_entity_to_category(category) for category in categories]
def get_categories_with_last_updates(
board_id: BoardID,
) -> Sequence[CategoryWithLastUpdate]:
"""Return the categories for that board.
Include the creator of the last posting in each category.
"""
categories_with_last_update = db.session \
.query(DbCategory) \
.filter_by(board_id=board_id) \
.filter_by(hidden=False) \
.options(
db.joinedload(DbCategory.last_posting_updated_by),
) \
.all()
return [
_db_entity_to_category_with_last_update(category)
for category in categories_with_last_update
]
def _db_entity_to_category(category: DbCategory) -> Category:
return Category(
category.id,
category.board_id,
category.position,
category.slug,
category.title,
category.description,
category.topic_count,
category.posting_count,
category.hidden,
)
def _db_entity_to_category_with_last_update(
category: DbCategory,
) -> CategoryWithLastUpdate:
return CategoryWithLastUpdate(
category.id,
category.board_id,
category.position,
category.slug,
category.title,
category.description,
category.topic_count,
category.posting_count,
category.hidden,
category.last_posting_updated_at,
category.last_posting_updated_by,
)
|
homeworkprod/byceps
|
byceps/services/board/category_query_service.py
|
Python
|
bsd-3-clause
| 3,564
|
# Copyright (c) 2014, Guillermo López-Anglada. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
from subprocess import Popen
from subprocess import PIPE
from subprocess import TimeoutExpired
import threading
from . import PluginLogger
from .plat import supress_window
from .text import clean
from .text import decode
_logger = PluginLogger(__name__)
class TextFilter(object):
'''Filters text through an external program (sync).
'''
def __init__(self, args, timeout=10):
self.args = args
self.timeout = timeout
# Encoding the external program likes to receive.
self.in_encoding = 'utf-8'
# Encoding the external program will emit.
self.out_encoding = 'utf-8'
self._proc = None
def encode(self, text):
return text.encode(self.in_encoding)
def _start(self):
try:
self._proc = Popen(self.args,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
startupinfo=supress_window())
except OSError as e:
_logger.error('while starting text filter program: %s', e)
return
def filter(self, input_text):
self._start()
try:
in_bytes = self.encode(input_text)
out_bytes, err_bytes = self._proc.communicate(in_bytes,
self.timeout)
if err_bytes:
_logger.error('while filtering text: %s',
clean(decode(err_bytes, self.out_encoding)))
return
return clean(decode(out_bytes, self.out_encoding))
except TimeoutExpired:
_logger.debug('text filter program response timed out')
return
except Exception as e:
_logger.error('while running TextFilter: %s', e)
return
|
guillermooo/dart-sublime-bundle-releases
|
sublime_plugin_lib/filter.py
|
Python
|
bsd-3-clause
| 2,055
|
import wave
import sys
import struct
import time
import subprocess
import threading
import traceback
import shlex
import os
import string
import random
import datetime as dt
import numpy as np
import scipy as sp
import scipy.special
from contextlib import closing
from argparse import ArgumentParser
from pyoperant import Error
try:
import simplejson as json
except ImportError:
import json
class NumpyAwareJSONEncoder(json.JSONEncoder):
""" this json encoder converts numpy arrays to lists so that json can write them.
example usage:
>>> import numpy as np
>>> dict_to_save = {'array': np.zeros((5,))}
>>> json.dumps(dict_to_save,
cls=NumpyAwareJSONEncoder
)
'{"array": [0.0, 0.0, 0.0, 0.0, 0.0]}'
"""
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
# consider importing this from python-neo
class Event(object):
"""docstring for Event"""
def __init__(self, time=None, duration=None, label='', name=None, description=None, file_origin=None, *args, **kwargs):
super(Event, self).__init__()
self.time = time
self.duration = duration
self.label = label
self.name = name
self.description = description
self.file_origin = file_origin
self.annotations = {}
self.annotate(**kwargs)
def annotate(self,**kwargs):
self.annotations.update(kwargs)
class Stimulus(Event):
"""docstring for Stimulus"""
def __init__(self, *args, **kwargs):
super(Stimulus, self).__init__(*args, **kwargs)
if self.label=='':
self.label = 'stimulus'
class AuditoryStimulus(Stimulus):
"""docstring for AuditoryStimulus"""
def __init__(self, *args, **kwargs):
super(AuditoryStimulus, self).__init__(*args, **kwargs)
if self.label=='':
self.label = 'auditory_stimulus'
def run_state_machine(start_in='pre', error_state=None, error_callback=None, **state_functions):
"""runs a state machine defined by the keyword arguments
>>> def run_start():
>>> print "in 'run_start'"
>>> return 'next'
>>> def run_next():
>>> print "in 'run_next'"
>>> return None
>>> run_state_machine(start_in='start',
>>> start=run_start,
>>> next=run_next)
in 'run_start'
in 'run_next'
None
"""
# make sure the start state has a function to run
assert (start_in in state_functions.keys())
# make sure all of the arguments passed in are callable
for func in state_functions.values():
assert hasattr(func, '__call__')
state = start_in
while state is not None:
try:
state = state_functions[state]()
except Exception, e:
if error_callback:
error_callback(e)
raise
else:
raise
state = error_state
class Trial(Event):
"""docstring for Trial"""
def __init__(self,
index=None,
type_='normal',
class_=None,
*args, **kwargs):
super(Trial, self).__init__(*args, **kwargs)
self.label = 'trial'
self.session = None
self.index = index
self.type_ = type_
self.stimulus = None
self.class_ = class_
self.response = None
self.correct = None
self.rt = None
self.reward = False
self.punish = False
self.events = []
self.stim_event = None
class Command(object):
"""
Enables to run subprocess commands in a different thread with TIMEOUT option.
via https://gist.github.com/kirpit/1306188
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
if isinstance(command, basestring):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
def parse_commandline(arg_str=sys.argv[1:]):
""" parse command line arguments
note: optparse is depreciated w/ v2.7 in favor of argparse
"""
parser=ArgumentParser()
parser.add_argument('-B', '--box',
action='store', type=int, dest='box', required=False,
help='(int) box identifier')
parser.add_argument('-S', '--subject',
action='store', type=str, dest='subj', required=False,
help='subject ID and folder name')
parser.add_argument('-c','--config',
action='store', type=str, dest='config_file', default='config.json', required=True,
help='configuration file [default: %(default)s]')
args = parser.parse_args(arg_str)
return vars(args)
def check_cmdline_params(parameters, cmd_line):
# if someone is using red bands they should ammend the checks I perform here
allchars=string.maketrans('','')
nodigs=allchars.translate(allchars, string.digits)
if not ('box' not in cmd_line or cmd_line['box'] == int(parameters['panel_name'].encode('ascii','ignore').translate(allchars, nodigs))):
print "box number doesn't match config and command line"
return False
if not ('subj' not in cmd_line or int(cmd_line['subj'].encode('ascii','ignore').translate(allchars, nodigs)) == int(parameters['subject'].encode('ascii','ignore').translate(allchars, nodigs))):
print "subject number doesn't match config and command line"
return False
return True
def time_in_range(start, end, x):
"""Return true if x is in the range [start, end]"""
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
def is_day(latitude = '32.82', longitude = '-117.14'):
"""Is it daytime?
(lat,long) -- latitude and longitude of location to check (default is San Diego)
Returns True if it is daytime
"""
import ephem
obs = ephem.Observer()
obs.lat = latitude # San Diego, CA
obs.long = longitude
sun = ephem.Sun()
sun.compute()
next_sunrise = ephem.localtime(obs.next_rising(sun))
next_sunset = ephem.localtime(obs.next_setting(sun))
return next_sunset < next_sunrise
def check_time(schedule,fmt="%H:%M"):
""" determine whether trials should be done given the current time and the light schedule
returns Boolean if current time meets schedule
schedule='sun' will change lights according to local sunrise and sunset
schedule=[('07:00','17:00')] will have lights on between 7am and 5pm
schedule=[('06:00','12:00'),('18:00','24:00')] will have lights on between
"""
if schedule == 'sun':
if is_day():
return True
else:
for epoch in schedule:
assert len(epoch) is 2
now = dt.datetime.time(dt.datetime.now())
start = dt.datetime.time(dt.datetime.strptime(epoch[0],fmt))
end = dt.datetime.time(dt.datetime.strptime(epoch[1],fmt))
if time_in_range(start,end,now):
return True
return False
def wait(secs=1.0, final_countdown=0.0,waitfunc=None):
"""Smartly wait for a given time period.
secs -- total time to wait in seconds
final_countdown -- time at end of secs to wait and constantly poll the clock
waitfunc -- optional function to run in a loop during hogCPUperiod
If secs=1.0 and final_countdown=0.2 then for 0.8s python's time.sleep function will be used,
which is not especially precise, but allows the cpu to perform housekeeping. In
the final hogCPUsecs the more precise method of constantly polling the clock
is used for greater precision.
"""
#initial relaxed period, using sleep (better for system resources etc)
if secs > final_countdown:
time.sleep(secs-final_countdown)
secs = final_countdown # only this much is now left
#It's the Final Countdown!!
#hog the cpu, checking time
t0 = time.time()
while (time.time()-t0) < secs:
#let's see if any events were collected in meantime
try:
waitfunc()
except:
pass
def auditory_stim_from_wav(wav):
with closing(wave.open(wav,'rb')) as wf:
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wf.getparams()
duration = float(nframes)/sampwidth
duration = duration * 2.0 / framerate
stim = AuditoryStimulus(time=0.0,
duration=duration,
name=wav,
label='wav',
description='',
file_origin=wav,
annotations={'nchannels': nchannels,
'sampwidth': sampwidth,
'framerate': framerate,
'nframes': nframes,
'comptype': comptype,
'compname': compname,
}
)
return stim
def concat_wav(input_file_list, output_filename='concat.wav'):
""" concat a set of wav files into a single wav file and return the output filename
takes in a tuple list of files and duration of pause after the file
input_file_list = [
('a.wav', 0.1),
('b.wav', 0.09),
('c.wav', 0.0),
]
returns a list of AuditoryStimulus objects
TODO: add checks for sampling rate, number of channels, etc.
"""
cursor = 0
epochs = [] # list of file epochs
audio_data = ''
with closing(wave.open(output_filename, 'wb')) as output:
for input_filename, isi in input_file_list:
# read in the wav file
with closing(wave.open(input_filename,'rb')) as wav_part:
try:
params = wav_part.getparams()
output.setparams(params)
fs = output.getframerate()
except: # TODO: what was I trying to except here? be more specific
pass
audio_frames = wav_part.readframes(wav_part.getnframes())
# append the audio data
audio_data += audio_frames
part_start = cursor
part_dur = len(audio_frames)/params[1]
epochs.append(AuditoryStimulus(time=float(part_start)/fs,
duration=float(part_dur)/fs,
name=input_filename,
file_origin=input_filename,
annotations=params,
label='motif'
))
cursor += part_dur # move cursor length of the duration
# add isi
if isi > 0.0:
isi_frames = ''.join([struct.pack('h', fr) for fr in [0]*int(fs*isi)])
audio_data += isi_frames
cursor += len(isi_frames)/params[1]
# concat all of the audio together and write to file
output.writeframes(audio_data)
description = 'concatenated on-the-fly'
concat_wav = AuditoryStimulus(time=0.0,
duration=epochs[-1].time+epochs[-1].duration,
name=output_filename,
label='wav',
description=description,
file_origin=output_filename,
annotations=output.getparams(),
)
return (concat_wav,epochs)
def get_num_open_fds():
'''
return the number of open file descriptors for current process
.. warning: will only work on UNIX-like os-es.
'''
pid = os.getpid()
procs = subprocess.check_output(
[ "lsof", '-w', '-Ff', "-p", str( pid ) ] )
nprocs = len(
filter(
lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(),
procs.split( '\n' ) )
)
return nprocs
def rand_from_log_shape_dist(alpha=10):
"""
randomly samples from a distribution between 0 and 1 with pdf shaped like the log function
low probability of getting close to zero, increasing probability going towards 1
alpha determines how sharp the curve is, higher alpha, sharper curve.
"""
beta = (alpha + 1) * np.log(alpha + 1) - alpha
t = random.random()
ret = ((beta * t-1)/(sp.special.lambertw((beta*t-1)/np.e)) - 1) / alpha
return max(min(np.real(ret), 1), 0)
|
gentnerlab/pyoperant
|
pyoperant/utils.py
|
Python
|
bsd-3-clause
| 13,912
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['PolyTrend'] , ['Seasonal_Minute'] , ['LSTM'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_PolyTrend_Seasonal_Minute_LSTM.py
|
Python
|
bsd-3-clause
| 168
|
from django.db.models.signals import post_save, m2m_changed
from django.dispatch import receiver
from django.contrib.auth.models import User, Group
from mqtt.cache_clear import mqtt_cache_clear
from .models import UserProfile, GroupProfile
# Add signal to automatically clear cache when group permissions change
@receiver(m2m_changed, sender=User.groups.through)
def user_groups_changed_handler(sender, instance, action, **kwargs):
if action == 'post_add' or action == 'post_remove':
# invalidate permissions cache
mqtt_cache_clear()
# Add signal to automatically extend group profile
@receiver(post_save, sender=Group)
def create_group_profile(sender, instance, created, **kwargs):
if created:
GroupProfile.objects.create(group=instance)
# Add signal to automatically extend user profile
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
|
EMSTrack/WebServerAndClient
|
login/signals.py
|
Python
|
bsd-3-clause
| 992
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import integrate
from .kern import Kern
from ...core.parameterization import Param
from ...util.linalg import tdot
from ... import util
from ...util.config import config # for assesing whether to use cython
from paramz.caching import Cache_this
from paramz.transformations import Logexp
try:
from . import stationary_cython
use_stationary_cython = config.getboolean('cython', 'working')
except ImportError:
print('warning in stationary: failed to import cython module: falling back to numpy')
use_stationary_cython = False
class Stationary(Kern):
"""
Stationary kernels (covariance functions).
Stationary covariance fucntion depend only on r, where r is defined as
.. math::
r(x, x') = \\sqrt{ \\sum_{q=1}^Q (x_q - x'_q)^2 }
The covariance function k(x, x' can then be written k(r).
In this implementation, r is scaled by the lengthscales parameter(s):
.. math::
r(x, x') = \\sqrt{ \\sum_{q=1}^Q \\frac{(x_q - x'_q)^2}{\ell_q^2} }.
By default, there's only one lengthscale: seaprate lengthscales for each
dimension can be enables by setting ARD=True.
To implement a stationary covariance function using this class, one need
only define the covariance function k(r), and it derivative.
```
def K_of_r(self, r):
return foo
def dK_dr(self, r):
return bar
```
The lengthscale(s) and variance parameters are added to the structure automatically.
Thanks to @strongh:
In Stationary, a covariance function is defined in GPy as stationary when it depends only on the l2-norm |x_1 - x_2 |.
However this is the typical definition of isotropy, while stationarity is usually a bit more relaxed.
The more common version of stationarity is that the covariance is a function of x_1 - x_2 (See e.g. R&W first paragraph of section 4.1).
"""
def __init__(self, input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=False):
super(Stationary, self).__init__(input_dim, active_dims, name,useGPU=useGPU)
self.ARD = ARD
if not ARD:
if lengthscale is None:
lengthscale = np.ones(1)
else:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == 1, "Only 1 lengthscale needed for non-ARD kernel"
else:
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size in [1, input_dim], "Bad number of lengthscales"
if lengthscale.size != input_dim:
lengthscale = np.ones(input_dim)*lengthscale
else:
lengthscale = np.ones(self.input_dim)
self.lengthscale = Param('lengthscale', lengthscale, Logexp())
self.variance = Param('variance', variance, Logexp())
assert self.variance.size==1
self.link_parameters(self.variance, self.lengthscale)
def _save_to_input_dict(self):
input_dict = super(Stationary, self)._save_to_input_dict()
input_dict["variance"] = self.variance.values.tolist()
input_dict["lengthscale"] = self.lengthscale.values.tolist()
input_dict["ARD"] = self.ARD
return input_dict
def K_of_r(self, r):
raise NotImplementedError("implement the covariance function as a fn of r to use this class")
def dK_dr(self, r):
raise NotImplementedError("implement derivative of the covariance function wrt r to use this class")
@Cache_this(limit=3, ignore_args=())
def dK2_drdr(self, r):
raise NotImplementedError("implement second derivative of covariance wrt r to use this method")
@Cache_this(limit=3, ignore_args=())
def dK2_drdr_diag(self):
"Second order derivative of K in r_{i,i}. The diagonal entries are always zero, so we do not give it here."
raise NotImplementedError("implement second derivative of covariance wrt r_diag to use this method")
@Cache_this(limit=3, ignore_args=())
def K(self, X, X2=None):
"""
Kernel function applied on inputs X and X2.
In the stationary case there is an inner function depending on the
distances from X to X2, called r.
K(X, X2) = K_of_r((X-X2)**2)
"""
r = self._scaled_dist(X, X2)
return self.K_of_r(r)
@Cache_this(limit=3, ignore_args=())
def dK_dr_via_X(self, X, X2):
"""
compute the derivative of K wrt X going through X
"""
#a convenience function, so we can cache dK_dr
return self.dK_dr(self._scaled_dist(X, X2))
@Cache_this(limit=3, ignore_args=())
def dK2_drdr_via_X(self, X, X2):
#a convenience function, so we can cache dK_dr
return self.dK2_drdr(self._scaled_dist(X, X2))
def _unscaled_dist(self, X, X2=None):
"""
Compute the Euclidean distance between each row of X and X2, or between
each pair of rows of X if X2 is None.
"""
#X, = self._slice_X(X)
if X2 is None:
Xsq = np.sum(np.square(X),1)
r2 = -2.*tdot(X) + (Xsq[:,None] + Xsq[None,:])
util.diag.view(r2)[:,]= 0. # force diagnoal to be zero: sometime numerically a little negative
r2 = np.clip(r2, 0, np.inf)
return np.sqrt(r2)
else:
#X2, = self._slice_X(X2)
X1sq = np.sum(np.square(X),1)
X2sq = np.sum(np.square(X2),1)
r2 = -2.*np.dot(X, X2.T) + (X1sq[:,None] + X2sq[None,:])
r2 = np.clip(r2, 0, np.inf)
return np.sqrt(r2)
@Cache_this(limit=3, ignore_args=())
def _scaled_dist(self, X, X2=None):
"""
Efficiently compute the scaled distance, r.
..math::
r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 )
Note that if thre is only one lengthscale, l comes outside the sum. In
this case we compute the unscaled distance first (in a separate
function for caching) and divide by lengthscale afterwards
"""
if self.ARD:
if X2 is not None:
X2 = X2 / self.lengthscale
return self._unscaled_dist(X/self.lengthscale, X2)
else:
return self._unscaled_dist(X, X2)/self.lengthscale
def Kdiag(self, X):
ret = np.empty(X.shape[0])
ret[:] = self.variance
return ret
def reset_gradients(self):
self.variance.gradient = 0.
if not self.ARD:
self.lengthscale.gradient = 0.
else:
self.lengthscale.gradient = np.zeros(self.input_dim)
def update_gradients_diag(self, dL_dKdiag, X):
"""
Given the derivative of the objective with respect to the diagonal of
the covariance matrix, compute the derivative wrt the parameters of
this kernel and stor in the <parameter>.gradient field.
See also update_gradients_full
"""
self.variance.gradient = np.sum(dL_dKdiag)
self.lengthscale.gradient = 0.
def update_gradients_full(self, dL_dK, X, X2=None, reset=True):
"""
Given the derivative of the objective wrt the covariance matrix
(dL_dK), compute the gradient wrt the parameters of this kernel,
and store in the parameters object as e.g. self.variance.gradient
"""
self.variance.gradient = np.sum(self.K(X, X2)* dL_dK)/self.variance
#now the lengthscale gradient(s)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
if self.ARD:
tmp = dL_dr*self._inv_dist(X, X2)
if X2 is None: X2 = X
if use_stationary_cython:
self.lengthscale.gradient = self._lengthscale_grads_cython(tmp, X, X2)
else:
self.lengthscale.gradient = self._lengthscale_grads_pure(tmp, X, X2)
else:
r = self._scaled_dist(X, X2)
self.lengthscale.gradient = -np.sum(dL_dr*r)/self.lengthscale
def update_gradients_direct(self, dL_dVar, dL_dLen):
"""
Specially intended for the Grid regression case.
Given the computed log likelihood derivates, update the corresponding
kernel and likelihood gradients.
Useful for when gradients have been computed a priori.
"""
self.variance.gradient = dL_dVar
self.lengthscale.gradient = dL_dLen
def _inv_dist(self, X, X2=None):
"""
Compute the elementwise inverse of the distance matrix, expecpt on the
diagonal, where we return zero (the distance on the diagonal is zero).
This term appears in derviatives.
"""
dist = self._scaled_dist(X, X2).copy()
return 1./np.where(dist != 0., dist, np.inf)
def _lengthscale_grads_pure(self, tmp, X, X2):
return -np.array([np.sum(tmp * np.square(X[:,q:q+1] - X2[:,q:q+1].T)) for q in range(self.input_dim)])/self.lengthscale**3
def _lengthscale_grads_cython(self, tmp, X, X2):
N,M = tmp.shape
Q = self.input_dim
X, X2 = np.ascontiguousarray(X), np.ascontiguousarray(X2)
grads = np.zeros(self.input_dim)
stationary_cython.lengthscale_grads(N, M, Q, tmp, X, X2, grads)
return -grads/self.lengthscale**3
def gradients_X(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective wrt K (dL_dK), compute the derivative wrt X
"""
if use_stationary_cython:
return self._gradients_X_cython(dL_dK, X, X2)
else:
return self._gradients_X_pure(dL_dK, X, X2)
def gradients_XX(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective K(dL_dK), compute the second derivative of K wrt X and X2:
returns the full covariance matrix [QxQ] of the input dimensionfor each pair or vectors, thus
the returned array is of shape [NxNxQxQ].
..math:
\frac{\partial^2 K}{\partial X2 ^2} = - \frac{\partial^2 K}{\partial X\partial X2}
..returns:
dL2_dXdX2: [NxMxQxQ] in the cov=True case, or [NxMxQ] in the cov=False case,
for X [NxQ] and X2[MxQ] (X2 is X if, X2 is None)
Thus, we return the second derivative in X2.
"""
# According to multivariable chain rule, we can chain the second derivative through r:
# d2K_dXdX2 = dK_dr*d2r_dXdX2 + d2K_drdr * dr_dX * dr_dX2:
invdist = self._inv_dist(X, X2)
invdist2 = invdist**2
dL_dr = self.dK_dr_via_X(X, X2) #* dL_dK # we perform this product later
tmp1 = dL_dr * invdist
dL_drdr = self.dK2_drdr_via_X(X, X2) #* dL_dK # we perofrm this product later
tmp2 = dL_drdr*invdist2
l2 = np.ones(X.shape[1])*self.lengthscale**2 #np.multiply(np.ones(X.shape[1]) ,self.lengthscale**2)
if X2 is None:
X2 = X
tmp1 -= np.eye(X.shape[0])*self.variance
else:
tmp1[invdist2==0.] -= self.variance
#grad = np.empty((X.shape[0], X2.shape[0], X2.shape[1], X.shape[1]), dtype=np.float64)
dist = X[:,None,:] - X2[None,:,:]
dist = (dist[:,:,:,None]*dist[:,:,None,:])
I = np.ones((X.shape[0], X2.shape[0], X2.shape[1], X.shape[1]))*np.eye((X2.shape[1]))
grad = (((dL_dK*(tmp1*invdist2 - tmp2))[:,:,None,None] * dist)/l2[None,None,:,None]
- (dL_dK*tmp1)[:,:,None,None] * I)/l2[None,None,None,:]
return grad
def gradients_XX_diag(self, dL_dK_diag, X):
"""
Given the derivative of the objective dL_dK, compute the second derivative of K wrt X:
..math:
\frac{\partial^2 K}{\partial X\partial X}
..returns:
dL2_dXdX: [NxQxQ]
"""
dL_dK_diag = dL_dK_diag.copy().reshape(-1, 1, 1)
assert (dL_dK_diag.size == X.shape[0]) or (dL_dK_diag.size == 1), "dL_dK_diag has to be given as row [N] or column vector [Nx1]"
l4 = np.ones(X.shape[1])*self.lengthscale**2
return dL_dK_diag * (np.eye(X.shape[1]) * -self.dK2_drdr_diag()/(l4))[None, :,:]# np.zeros(X.shape+(X.shape[1],))
#return np.ones(X.shape) * d2L_dK * self.variance/self.lengthscale**2 # np.zeros(X.shape)
def dgradients_dX(self, X, X2, dimX):
g1 = self.dK2_dvariancedX(X, X2, dimX)
g2 = self.dK2_dlengthscaledX(X, X2, dimX)
return [g1, g2]
def dgradients_dX2(self, X, X2, dimX2):
g1 = self.dK2_dvariancedX2(X, X2, dimX2)
g2 = self.dK2_dlengthscaledX2(X, X2, dimX2)
return [g1, g2]
def dgradients2_dXdX2(self, X, X2, dimX, dimX2):
g1 = self.dK3_dvariancedXdX2(X, X2, dimX, dimX2)
g2 = self.dK3_dlengthscaledXdX2(X, X2, dimX, dimX2)
return [g1, g2]
def _gradients_X_pure(self, dL_dK, X, X2=None):
invdist = self._inv_dist(X, X2)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
tmp = invdist*dL_dr
if X2 is None:
tmp = tmp + tmp.T
X2 = X
#The high-memory numpy way:
#d = X[:, None, :] - X2[None, :, :]
#grad = np.sum(tmp[:,:,None]*d,1)/self.lengthscale**2
#the lower memory way with a loop
grad = np.empty(X.shape, dtype=np.float64)
for q in range(self.input_dim):
np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=grad[:,q])
return grad/self.lengthscale**2
def _gradients_X_cython(self, dL_dK, X, X2=None):
invdist = self._inv_dist(X, X2)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
tmp = invdist*dL_dr
if X2 is None:
tmp = tmp + tmp.T
X2 = X
X, X2 = np.ascontiguousarray(X), np.ascontiguousarray(X2)
grad = np.zeros(X.shape)
stationary_cython.grad_X(X.shape[0], X.shape[1], X2.shape[0], X, X2, tmp, grad)
return grad/self.lengthscale**2
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def input_sensitivity(self, summarize=True):
return self.variance*np.ones(self.input_dim)/self.lengthscale**2
def get_one_dimensional_kernel(self, dimensions):
"""
Specially intended for the grid regression case
For a given covariance kernel, this method returns the corresponding kernel for
a single dimension. The resulting values can then be used in the algorithm for
reconstructing the full covariance matrix.
"""
raise NotImplementedError("implement one dimensional variation of kernel")
class Exponential(Stationary):
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Exponential'):
super(Exponential, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, r):
return self.variance * np.exp(-r)
def dK_dr(self, r):
return -self.K_of_r(r)
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Exponential, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.Exponential"
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Exponential(**input_dict)
# def sde(self):
# """
# Return the state space representation of the covariance.
# """
# F = np.array([[-1/self.lengthscale]])
# L = np.array([[1]])
# Qc = np.array([[2*self.variance/self.lengthscale]])
# H = np.array([[1]])
# Pinf = np.array([[self.variance]])
# # TODO: return the derivatives as well
#
# return (F, L, Qc, H, Pinf)
class OU(Stationary):
"""
OU kernel:
.. math::
k(r) = \\sigma^2 \exp(- r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^{\text{input_dim}} \\frac{(x_i-y_i)^2}{\ell_i^2} }
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='OU'):
super(OU, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(OU, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.OU"
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return OU(**input_dict)
def K_of_r(self, r):
return self.variance * np.exp(-r)
def dK_dr(self,r):
return -1.*self.variance*np.exp(-r)
class Matern32(Stationary):
"""
Matern 3/2 kernel:
.. math::
k(r) = \\sigma^2 (1 + \\sqrt{3} r) \exp(- \sqrt{3} r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^{\\text{input_dim}} \\frac{(x_i-y_i)^2}{\ell_i^2} }
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Mat32'):
super(Matern32, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Matern32, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.Matern32"
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Matern32(**input_dict)
def K_of_r(self, r):
return self.variance * (1. + np.sqrt(3.) * r) * np.exp(-np.sqrt(3.) * r)
def dK_dr(self,r):
return -3.*self.variance*r*np.exp(-np.sqrt(3.)*r)
def Gram_matrix(self, F, F1, F2, lower, upper):
"""
Return the Gram matrix of the vector of functions F with respect to the
RKHS norm. The use of this function is limited to input_dim=1.
:param F: vector of functions
:type F: np.array
:param F1: vector of derivatives of F
:type F1: np.array
:param F2: vector of second derivatives of F
:type F2: np.array
:param lower,upper: boundaries of the input domain
:type lower,upper: floats
"""
assert self.input_dim == 1
def L(x, i):
return(3. / self.lengthscale ** 2 * F[i](x) + 2 * np.sqrt(3) / self.lengthscale * F1[i](x) + F2[i](x))
n = F.shape[0]
G = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0]
Flower = np.array([f(lower) for f in F])[:, None]
F1lower = np.array([f(lower) for f in F1])[:, None]
return(self.lengthscale ** 3 / (12.*np.sqrt(3) * self.variance) * G + 1. / self.variance * np.dot(Flower, Flower.T) + self.lengthscale ** 2 / (3.*self.variance) * np.dot(F1lower, F1lower.T))
def sde(self):
"""
Return the state space representation of the covariance.
"""
variance = float(self.variance.values)
lengthscale = float(self.lengthscale.values)
foo = np.sqrt(3.)/lengthscale
F = np.array([[0, 1], [-foo**2, -2*foo]])
L = np.array([[0], [1]])
Qc = np.array([[12.*np.sqrt(3) / lengthscale**3 * variance]])
H = np.array([[1, 0]])
Pinf = np.array([[variance, 0],
[0, 3.*variance/(lengthscale**2)]])
# Allocate space for the derivatives
dF = np.empty([F.shape[0],F.shape[1],2])
dQc = np.empty([Qc.shape[0],Qc.shape[1],2])
dPinf = np.empty([Pinf.shape[0],Pinf.shape[1],2])
# The partial derivatives
dFvariance = np.zeros([2,2])
dFlengthscale = np.array([[0,0],
[6./lengthscale**3,2*np.sqrt(3)/lengthscale**2]])
dQcvariance = np.array([12.*np.sqrt(3)/lengthscale**3])
dQclengthscale = np.array([-3*12*np.sqrt(3)/lengthscale**4*variance])
dPinfvariance = np.array([[1,0],[0,3./lengthscale**2]])
dPinflengthscale = np.array([[0,0],
[0,-6*variance/lengthscale**3]])
# Combine the derivatives
dF[:,:,0] = dFvariance
dF[:,:,1] = dFlengthscale
dQc[:,:,0] = dQcvariance
dQc[:,:,1] = dQclengthscale
dPinf[:,:,0] = dPinfvariance
dPinf[:,:,1] = dPinflengthscale
return (F, L, Qc, H, Pinf, dF, dQc, dPinf)
class Matern52(Stationary):
"""
Matern 5/2 kernel:
.. math::
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r)
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Mat52'):
super(Matern52, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Matern52, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.Matern52"
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Matern52(**input_dict)
def K_of_r(self, r):
return self.variance*(1+np.sqrt(5.)*r+5./3*r**2)*np.exp(-np.sqrt(5.)*r)
def dK_dr(self, r):
return self.variance*(10./3*r -5.*r -5.*np.sqrt(5.)/3*r**2)*np.exp(-np.sqrt(5.)*r)
def Gram_matrix(self, F, F1, F2, F3, lower, upper):
"""
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1.
:param F: vector of functions
:type F: np.array
:param F1: vector of derivatives of F
:type F1: np.array
:param F2: vector of second derivatives of F
:type F2: np.array
:param F3: vector of third derivatives of F
:type F3: np.array
:param lower,upper: boundaries of the input domain
:type lower,upper: floats
"""
assert self.input_dim == 1
def L(x,i):
return(5*np.sqrt(5)/self.lengthscale**3*F[i](x) + 15./self.lengthscale**2*F1[i](x)+ 3*np.sqrt(5)/self.lengthscale*F2[i](x) + F3[i](x))
n = F.shape[0]
G = np.zeros((n,n))
for i in range(n):
for j in range(i,n):
G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0]
G_coef = 3.*self.lengthscale**5/(400*np.sqrt(5))
Flower = np.array([f(lower) for f in F])[:,None]
F1lower = np.array([f(lower) for f in F1])[:,None]
F2lower = np.array([f(lower) for f in F2])[:,None]
orig = 9./8*np.dot(Flower,Flower.T) + 9.*self.lengthscale**4/200*np.dot(F2lower,F2lower.T)
orig2 = 3./5*self.lengthscale**2 * ( np.dot(F1lower,F1lower.T) + 1./8*np.dot(Flower,F2lower.T) + 1./8*np.dot(F2lower,Flower.T))
return(1./self.variance* (G_coef*G + orig + orig2))
class ExpQuad(Stationary):
"""
The Exponentiated quadratic covariance function.
.. math::
k(r) = \sigma^2 \exp(- 0.5 r^2)
notes::
- This is exactly the same as the RBF covariance function, but the
RBF implementation also has some features for doing variational kernels
(the psi-statistics).
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='ExpQuad'):
super(ExpQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(ExpQuad, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.ExpQuad"
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return ExpQuad(**input_dict)
def K_of_r(self, r):
return self.variance * np.exp(-0.5 * r**2)
def dK_dr(self, r):
return -r*self.K_of_r(r)
class Cosine(Stationary):
"""
Cosine Covariance function
.. math::
k(r) = \sigma^2 \cos(r)
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Cosine'):
super(Cosine, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, r):
return self.variance * np.cos(r)
def dK_dr(self, r):
return -self.variance * np.sin(r)
class ExpQuadCosine(Stationary):
"""
Exponentiated quadratic multiplied by cosine covariance function (spectral mixture kernel).
.. math::
k(r) = \sigma^2 \exp(-2\pi^2r^2)\cos(2\pi r/T)
"""
def __init__(self, input_dim, variance=1., lengthscale=None, period=1., ARD=False, active_dims=None, name='ExpQuadCosine'):
super(ExpQuadCosine, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
self.period = Param('period', period, Logexp())
self.link_parameters(self.period)
def K_of_r(self, r):
return self.variance * np.exp(-2*np.pi**2*r**2)*np.cos(2*np.pi*r/self.period)
def dK_dr(self, r):
return -4*np.pi**2*r*self.K_of_r(r) - self.variance * 2*np.pi/self.period*np.exp(-2*np.pi**2*r**2)*np.sin(2*np.pi*r/self.period)
def update_gradients_full(self, dL_dK, X, X2=None):
super(ExpQuadCosine, self).update_gradients_full(dL_dK, X, X2)
r = self._scaled_dist(X, X2)
r2 = np.square(r)
dK_dperiod = self.variance * 2*np.pi*r/self.period**2*np.exp(-2*np.pi**2*r**2)*np.sin(2*np.pi*r/self.period)
grad = np.sum(dL_dK*dK_dperiod)
self.period.gradient = grad
def update_gradients_diag(self, dL_dKdiag, X):
super(ExpQuadCosine, self).update_gradients_diag(dL_dKdiag, X)
self.period.gradient = 0.
class Sinc(Stationary):
"""
Sinc Covariance function
.. math::
k(r) = \sigma^2 \sinc(\pi r)
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Sinc'):
super(Sinc, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, r):
return self.variance * np.sinc(2*r)
def dK_dr(self, r):
# small angle approximation to avoid divide by zero errors.
return np.where(r<1e-5, -self.variance*4/3*np.pi*np.pi*r, self.variance/r * (np.cos(2*np.pi*r)-np.sinc(2*r)))
class RatQuad(Stationary):
"""
Rational Quadratic Kernel
.. math::
k(r) = \sigma^2 \\bigg( 1 + \\frac{r^2}{2} \\bigg)^{- \\alpha}
"""
def __init__(self, input_dim, variance=1., lengthscale=None, power=2., ARD=False, active_dims=None, name='RatQuad'):
super(RatQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
self.power = Param('power', power, Logexp())
self.link_parameters(self.power)
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(RatQuad, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.RatQuad"
input_dict["power"] = self.power.values.tolist()
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return RatQuad(**input_dict)
def K_of_r(self, r):
r2 = np.square(r)
# return self.variance*np.power(1. + r2/2., -self.power)
return self.variance*np.exp(-self.power*np.log1p(r2/2.))
def dK_dr(self, r):
r2 = np.square(r)
# return -self.variance*self.power*r*np.power(1. + r2/2., - self.power - 1.)
return -self.variance*self.power*r*np.exp(-(self.power+1)*np.log1p(r2/2.))
def update_gradients_full(self, dL_dK, X, X2=None):
super(RatQuad, self).update_gradients_full(dL_dK, X, X2)
r = self._scaled_dist(X, X2)
r2 = np.square(r)
# dK_dpow = -self.variance * np.power(2., self.power) * np.power(r2 + 2., -self.power) * np.log(0.5*(r2+2.))
dK_dpow = -self.variance * np.exp(self.power*(np.log(2.)-np.log1p(r2+1)))*np.log1p(r2/2.)
grad = np.sum(dL_dK*dK_dpow)
self.power.gradient = grad
def update_gradients_diag(self, dL_dKdiag, X):
super(RatQuad, self).update_gradients_diag(dL_dKdiag, X)
self.power.gradient = 0.
|
SheffieldML/GPy
|
GPy/kern/src/stationary.py
|
Python
|
bsd-3-clause
| 29,873
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..model import GLMFit
def test_GLMFit_inputs():
input_map = dict(allow_ill_cond=dict(argstr='--illcond',
),
allow_repeated_subjects=dict(argstr='--allowsubjrep',
),
args=dict(argstr='%s',
),
calc_AR1=dict(argstr='--tar1',
),
check_opts=dict(argstr='--checkopts',
),
compute_log_y=dict(argstr='--logy',
),
contrast=dict(argstr='--C %s...',
),
cortex=dict(argstr='--cortex',
xor=['label_file'],
),
debug=dict(argstr='--debug',
),
design=dict(argstr='--X %s',
xor=('fsgd', 'design', 'one_sample'),
),
diag=dict(),
diag_cluster=dict(argstr='--diag-cluster',
),
environ=dict(nohash=True,
usedefault=True,
),
fixed_fx_dof=dict(argstr='--ffxdof %d',
xor=['fixed_fx_dof_file'],
),
fixed_fx_dof_file=dict(argstr='--ffxdofdat %d',
xor=['fixed_fx_dof'],
),
fixed_fx_var=dict(argstr='--yffxvar %s',
),
force_perm=dict(argstr='--perm-force',
),
fsgd=dict(argstr='--fsgd %s %s',
xor=('fsgd', 'design', 'one_sample'),
),
fwhm=dict(argstr='--fwhm %f',
),
glm_dir=dict(argstr='--glmdir %s',
genfile=True,
),
hemi=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='--y %s',
copyfile=False,
mandatory=True,
),
invert_mask=dict(argstr='--mask-inv',
),
label_file=dict(argstr='--label %s',
xor=['cortex'],
),
mask_file=dict(argstr='--mask %s',
),
no_contrast_ok=dict(argstr='--no-contrasts-ok',
),
no_est_fwhm=dict(argstr='--no-est-fwhm',
),
no_mask_smooth=dict(argstr='--no-mask-smooth',
),
no_prune=dict(argstr='--no-prune',
xor=['prunethresh'],
),
one_sample=dict(argstr='--osgm',
xor=('one_sample', 'fsgd', 'design', 'contrast'),
),
pca=dict(argstr='--pca',
),
per_voxel_reg=dict(argstr='--pvr %s...',
),
profile=dict(argstr='--profile %d',
),
prune=dict(argstr='--prune',
),
prune_thresh=dict(argstr='--prune_thr %f',
xor=['noprune'],
),
resynth_test=dict(argstr='--resynthtest %d',
),
save_cond=dict(argstr='--save-cond',
),
save_estimate=dict(argstr='--yhat-save',
),
save_res_corr_mtx=dict(argstr='--eres-scm',
),
save_residual=dict(argstr='--eres-save',
),
seed=dict(argstr='--seed %d',
),
self_reg=dict(argstr='--selfreg %d %d %d',
),
sim_done_file=dict(argstr='--sim-done %s',
),
sim_sign=dict(argstr='--sim-sign %s',
),
simulation=dict(argstr='--sim %s %d %f %s',
),
subject_id=dict(),
subjects_dir=dict(),
surf=dict(argstr='--surf %s %s %s',
requires=['subject_id', 'hemi'],
),
surf_geo=dict(usedefault=True,
),
synth=dict(argstr='--synth',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
uniform=dict(argstr='--uniform %f %f',
),
var_fwhm=dict(argstr='--var-fwhm %f',
),
vox_dump=dict(argstr='--voxdump %d %d %d',
),
weight_file=dict(xor=['weighted_ls'],
),
weight_inv=dict(argstr='--w-inv',
xor=['weighted_ls'],
),
weight_sqrt=dict(argstr='--w-sqrt',
xor=['weighted_ls'],
),
weighted_ls=dict(argstr='--wls %s',
xor=('weight_file', 'weight_inv', 'weight_sqrt'),
),
)
inputs = GLMFit.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_GLMFit_outputs():
output_map = dict(beta_file=dict(),
dof_file=dict(),
error_file=dict(),
error_stddev_file=dict(),
error_var_file=dict(),
estimate_file=dict(),
frame_eigenvectors=dict(),
ftest_file=dict(),
fwhm_file=dict(),
gamma_file=dict(),
gamma_var_file=dict(),
glm_dir=dict(),
mask_file=dict(),
sig_file=dict(),
singular_values=dict(),
spatial_eigenvectors=dict(),
svd_stats_file=dict(),
)
outputs = GLMFit.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
mick-d/nipype
|
nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py
|
Python
|
bsd-3-clause
| 4,335
|
from django.http import Http404
from django.http import HttpResponse
from common.templates import render_template
from blog.models import Post
def index(request):
post_list = Post.objects.all().order_by('-publishedDate')[:10]
return HttpResponse(render_template('blog/index.tpl', request, {'post_list': post_list}))
def fullpost(request, url):
try:
post = Post.objects.get(url=url)
except Post.DoesNotExist:
raise Http404
return HttpResponse(render_template('blog/fullpost.tpl', request, {'post': post}))
|
ProgVal/ProgVal.42
|
blog/views.py
|
Python
|
bsd-3-clause
| 544
|
"""
Functions for generating sigma algebras on finite sets.
Chetan Jhurani
http://users.ices.utexas.edu/~chetan/Publications.html
http://users.ices.utexas.edu/~chetan/reports/2009-03-ices-set_algebra_algorithms.pdf
"""
from collections import defaultdict
import numpy as np
from dit.utils import powerset
__all__ = ['is_sigma_algebra', 'sigma_algebra', 'atom_set']
def sets2matrix(C, X=None):
"""Returns the sets in C as binary strings representing elements in X.
Paramters
---------
C : set of frozensets
The set of subsets of X.
X : frozenset, None
The underlying set. If None, then X is taken to be the union of the
sets in C.
Returns
-------
Cmatrix : NumPy array, shape ( len(C), len(X) )
The 0-1 matrix whose rows represent sets in C. The columns tell us
if the corresponding element in X is present in the subset of C.
Xset : frozenset
The underlying set that was used to construct Cmatrix.
"""
# make sure C consists of frozensets and that X is frozen
C = set([frozenset(c) for c in C])
if X is None:
Xset = frozenset().union(*C)
else:
Xset = frozenset(X)
for cet in C:
if not Xset.issuperset(cet):
msg = "Set {0} is not a subset of {1}".format(cet, Xset)
raise Exception(msg)
# Each element of C will be represented as a binary string of 0s and 1s.
# Note, X is frozen, so its iterating order is fixed.
Cmatrix = [[1 if x in cet else 0 for x in Xset] for cet in C]
Cmatrix = np.array(Cmatrix, dtype=int)
return Cmatrix, Xset
def unique_columns(Cmatrix):
"""Returns a dictionary mapping columns to identical column indexes.
Parameters
----------
Cmatrix : NumPy array
A 0-1 matrix whose rows represent subsets of an underlying set. The
columns express membership of the underlying set's elements in
each of the subsets.
Returns
-------
unique_cols : defaultdict(set)
A dictionary mapping columns in Cmatrix to sets of column indexes.
All indexes that mapped from the same set represent identical columns.
"""
unique_cols = defaultdict(set)
for idx, col in enumerate(Cmatrix.transpose()):
unique_cols[tuple(col)].add(idx)
return unique_cols
def sigma_algebra(C, X=None):
"""Returns the sigma algebra generated by the subsets in C.
Let X be a set and C be a collection of subsets of X. The sigma algebra
generated by the subsets in C is the smallest sigma-algebra which contains
every subset in C.
Parameters
----------
C : set of frozensets
The set of subsets of X.
X : frozenset, None
The underlying set. If None, then X is taken to be the union of the
sets in C.
Returns
-------
sC : frozenset of frozensets
The sigma-algebra generated by C.
Notes
-----
The algorithm run time is generally exponential in |X|, the size of X.
"""
from itertools import product
Cmatrix, X = sets2matrix(C, X)
unique_cols = unique_columns(Cmatrix)
# Create a lookup from column IDs representing identical columns to the
# index of a unique representative in the list of unique representatives.
# This will be used to repopulate the larger binary string representation.
lookups = {}
for i, indexes in enumerate(unique_cols.values()):
for index in indexes:
lookups[index] = i
# The total number of elements is given by the powerset on all unique
# indexes. That is, we just generate all binary strings. Then, for each
# binary string, we construct the subset in the sigma algebra.
sC = set([])
for word in product([0, 1], repeat=len(unique_cols)):
subset = [x for i, x in enumerate(X) if word[lookups[i]] == 1]
sC.add(frozenset(subset))
sC = frozenset(sC)
return sC
def is_sigma_algebra(F, X=None):
"""Returns True if F is a sigma algebra on X.
Parameters
----------
F : set of frozensets
The candidate sigma algebra.
X : frozenset, None
The universal set. If None, then X is taken to be the union of the
sets in F.
Returns
-------
issa : bool
True if F is a sigma algebra and False if not.
Notes
-----
The time complexity of this algorithm is O ( len(F) * len(X) ).
"""
# The idea is to construct the matrix representing F. Then count the number
# of redundant columns. Denote this number by q. If F is a sigma algebra
# on a finite set X, then we must have:
# m + 2 == 2**(len(X) - q)).
# where m is the number of elements in F not equal to the empty set or
# or the universal set X.
Fmatrix, X = sets2matrix(F, X)
unique_cols = unique_columns(Fmatrix)
m = len(F)
emptyset = frozenset([])
if frozenset([emptyset, X]) <= F and m == 2**len(unique_cols):
return True
else:
return False
def is_sigma_algebra__brute(F, X=None):
"""Returns True if F is a sigma algebra on X.
Parameters
----------
F : set of frozensets
The candidate sigma algebra.
X : frozenset, None
The universal set. If None, then X is taken to be the union of the
sets in F.
Returns
-------
issa : bool
True if F is a sigma algebra and False if not.
Notes
-----
This is a brute force check against the definition of a sigma algebra
on a finite set. Its time complexity is O( len(F)**2 ).
"""
# This works because its not necessary to test countable unions if the
# base set X is finite. One need only consider pairwise unions.
if X is None:
X = frozenset().union(*F)
else:
X = frozenset(X)
for subset1 in F:
if X.difference(subset1) not in F:
return False
for subset2 in F:
if subset1.union(subset2) not in F:
return False
else:
return True
def atom_set(F, X=None, method=2):
"""
Returns the atoms of the sigma-algebra F.
Parameters
----------
F : set of frozensets
The candidate sigma algebra.
X : frozenset, None
The universal set. If None, then X is taken to be the union of the
sets in F.
Returns
-------
atoms : frozenset
A frozenset of frozensets, representing the atoms of the sigma algebra.
"""
if not isinstance(next(iter(F)), frozenset):
raise Exception('Input to `atom_set` must contain frozensets.')
def method1():
"""
# of ops = len(F) * 2**len(largest element in F)
"""
atoms = []
for cet in F:
if not cet:
# An atom must be nonempty.
continue
# Now look at all nonempty, proper subsets of cet.
#
# If you have a sample space with 64 elements, and then consider
# the trivial sigma algebra, then one element of F will be the
# empty set, while the other will have 64 elements. Taking the
# powerset of this set will require going through a list of 2^64
# elements...in addition to taking forever, we can't even store
# that in memory.
#
subsets = sorted(powerset(cet))[1:-1] # nonempty and proper
for subset in subsets:
if frozenset(subset) in F:
break
else:
# Then `cet` has no nonempty proper subset that is also in F.
atoms.append(frozenset(cet))
return atoms
def method2():
"""
# of ops = len(F) * len(F)
"""
atoms = []
for cet in F:
if len(cet) == 0:
# An atom must be nonempty.
continue
# We just go through the elements of F. If another nonempty
# element is a strict subset cet, then cet is not an atom.
#
for other_cet in F:
# We need to find an other_cet which is a non-empty proper subset
# of cet. Then, cet cannot be an atom.
L = len(other_cet)
if L == 0 or L == len(cet):
continue
elif other_cet.issubset(cet):
break
else:
atoms.append(frozenset(cet))
return atoms
atoms = {1: method1, 2: method2}[method]()
return frozenset(atoms)
|
Autoplectic/dit
|
dit/math/sigmaalgebra.py
|
Python
|
bsd-3-clause
| 8,590
|
#
# PgHelp.py -- web application threading help routines.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import tornado.web
import tornado.websocket
import tornado.template
from tornado.ioloop import IOLoop
import random
import json
import os, time
import datetime
import binascii
from collections import namedtuple
from ginga.misc import Bunch
default_interval = 10
ConfigEvent = namedtuple("ConfigEvent", ["type", "id", "width", "height"])
InputEvent = namedtuple("InputEvent", ["type", "id", "x", "y", "button",
"delta", "alt_key", "ctrl_key",
"meta_key", "shift_key", "key_code"])
GestureEvent = namedtuple("GestureEvent", ["type", "id", "x", "y", "dx", "dy",
"distance",
"theta", "direction", "vx", "vy",
"scale", "rotation", "isfirst",
"isfinal"])
WidgetEvent = namedtuple("WidgetEvent", ["type", "id", "value"])
TimerEvent = namedtuple("TimerEvent", ["type", "id", "value"])
class ApplicationHandler(tornado.websocket.WebSocketHandler):
def initialize(self, name, app):
self.name = name
self.app = app
self.app.add_ws_handler(self)
self.event_callbacks = {
"activate": WidgetEvent,
"setbounds": ConfigEvent,
"mousedown": InputEvent,
"mouseup": InputEvent,
"mousemove": InputEvent,
"mouseout": InputEvent,
"mouseover": InputEvent,
"mousewheel": InputEvent,
"wheel": InputEvent,
"click": InputEvent,
"dblclick": InputEvent,
"keydown": InputEvent,
"keyup": InputEvent,
"keypress": InputEvent,
"resize": InputEvent,
"focus": InputEvent,
"focusout": InputEvent,
"blur": InputEvent,
"drop": InputEvent,
#"paste": InputEvent,
# These are all Hammer.js events
"pinch": GestureEvent,
"pinchstart": GestureEvent,
"pinchend": GestureEvent,
"rotate": GestureEvent,
"rotatestart": GestureEvent,
"rotateend": GestureEvent,
"pan": GestureEvent,
"panstart": GestureEvent,
"panend": GestureEvent,
"tap": GestureEvent,
"swipe": GestureEvent,
}
#self.interval = 10
interval = self.settings.get("timer_interval", default_interval)
if self.name in self.settings:
interval = self.settings[self.name].get("timer_interval", interval)
self.interval = interval
# randomize the first timeout so we don't get every timer
# expiring at the same time
interval = random.randint(1, self.interval)
delta = datetime.timedelta(milliseconds=interval)
self.timeout = IOLoop.current().add_timeout(delta, self.timer_tick)
def add_event_type(self, msg_type, event_class):
self.event_callbacks[msg_type] = event_class
def on_open(self, *args, **kwdargs):
self.set_nodelay(True)
def on_close(self):
IOLoop.current().remove_timeout(self.timeout)
def on_message(self, raw_message):
message = json.loads(raw_message)
event_type = message.get("type")
try:
event_class = self.event_callbacks[event_type]
except KeyError:
print("I don't know how to process '%s' events!" % (
event_type))
return
event = event_class(**message)
self.app.widget_event(event)
def do_operation(self, operation, **kwargs):
message = dict(kwargs, operation=operation)
raw_message = json.dumps(message)
self.write_message(raw_message)
def timer_tick(self):
event = TimerEvent(type="timer", id=0, value=time.time())
# TODO: should exceptions thrown from this be caught and ignored
self.app.widget_event(event)
delta = datetime.timedelta(milliseconds = self.interval)
self.timeout = IOLoop.current().add_timeout(delta, self.timer_tick)
class WindowHandler(tornado.web.RequestHandler):
def initialize(self, name, url, app):
self.app = app
self.logger = app.logger
self.logger.info("windowhandler initialize")
self.name = name
self.url = url
def make_index(self, wids):
template = '''
<!doctype html>
<html>
<head>
<title>%(title)s</title>
</head>
<body>
%(content)s
</body>
</html>'''
content = ["<ul>"]
for wid in wids:
content.append('''<li><a href="%s?id=%s">Window %s</a></li>''' % (
self.url, wid, wid))
content.append("</ul>")
return template % dict(title="Window index", content=''.join(content))
def get(self):
self.logger.info("windowhandler get")
# Collect arguments
wid = self.get_argument('id', None)
# Get window with this id
wids = self.app.get_wids()
if wid in wids:
window = self.app.get_window(wid)
output = window.render()
else:
output = self.make_index(wids)
self.write(output)
|
eteq/ginga
|
ginga/web/pgw/PgHelp.py
|
Python
|
bsd-3-clause
| 5,510
|
from .mean import mean
from .decimalize import decimalize
from .standard_deviation import standard_deviation
def z_scores(data):
"""
Standardizing a variable or set of data is transforming the data such that it
has a mean of 0 and standard deviation of 1.
Each converted value equals how many standard deviations the value is above or below the mean.
These converted values are known as "z scores".
Equation:
.. math::
z_i = \\frac{X_i - \\bar{X}}{s_X}
In English:
- Subract the value from the mean.
- Divide the result by the standard deviation.
Args:
data: A list of numerical objects.
Returns:
A list of float objects.
Examples:
>>> z_scores([-2, -1, 0, 1, 2])
[1.2649110640673518, 0.6324555320336759, 0.0, -0.6324555320336759, -1.2649110640673518]
>>> z_scores([1, 2])
[0.7071067811865475, -0.7071067811865475]
>>> z_scores([90]) # a z score for one value is not defined
>>> z_scores(4) # a z score for one value is not defined
"""
# You can't get z scores for one number
if type(data) is int:
return(None)
elif type(data) is list:
# You can't get z scores for one number
if len(data) < 2:
return(None)
mean_of_data = decimalize(mean(data))
sd_of_data = decimalize(standard_deviation(data))
z_scores = [float((mean_of_data - ii) / sd_of_data) for ii in data]
return(z_scores)
|
jhowardanderson/simplestatistics
|
simplestatistics/statistics/z_scores.py
|
Python
|
bsd-3-clause
| 1,514
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-18 12:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('jobs', '0010_auto_20170316_1739'),
]
operations = [
migrations.CreateModel(
name='Freelancer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title', models.CharField(max_length=255)),
('description', models.TextField()),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='post',
name='is_freelancer',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='post',
name='freelancer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='jobs.Freelancer'),
),
]
|
ScorpionResponse/freelancefinder
|
freelancefinder/jobs/migrations/0011_auto_20170318_1237.py
|
Python
|
bsd-3-clause
| 1,734
|
"""
Django settings for encounterdeck project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
DEBUG = os.getenv('DJANGO_DEBUG') != 'FALSE'
if DEBUG:
SECRET_KEY = "SuperSecretKey:D"
else:
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY')
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'encounterdeck.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'encounterdeck.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
|
patjouk/EncounterDeck
|
encounterdeck/settings.py
|
Python
|
bsd-3-clause
| 3,584
|
""" Python library for interacting with Project Vote Smart API.
Project Vote Smart's API (http://www.votesmart.org/services_api.php)
provides rich biographical data, including data on votes, committee
assignments, and much more.
"""
__author__ = "James Turk (jturk@sunlightfoundation.com)"
__version__ = "0.2.1"
__copyright__ = "Copyright (c) 2009 Sunlight Labs"
__license__ = "BSD"
import urllib, urllib2
try:
import json
except ImportError:
import simplejson as json
class VotesmartApiError(Exception):
""" Exception for Sunlight API errors """
class VotesmartApiObject(object):
def __init__(self, d):
self.__dict__ = d
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__dict__)
class Address(object):
def __init__(self, d):
self.__dict__.update(d['address'])
self.__dict__.update(d['phone'])
self.__dict__.update(d['notes'])
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__dict__)
class WebAddress(VotesmartApiObject):
def __str__(self):
return self.webAddress
class Bio(object):
def __init__(self, d):
#self.__dict__.update(d['election'])
#self.__dict__.update(d['office'])
self.__dict__.update(d['candidate'])
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__dict__)
class AddlBio(VotesmartApiObject):
def __str__(self):
return ': '.join((self.name, self.data))
class Candidate(VotesmartApiObject):
def __str__(self):
return ' '.join((self.firstName, self.lastName))
class CommitteeType(VotesmartApiObject):
def __str__(self):
return self.name
class Committee(VotesmartApiObject):
def __str__(self):
return self.name
class CommitteeDetail(VotesmartApiObject):
def __str__(self):
return self.name
class CommitteeMember(VotesmartApiObject):
def __str__(self):
return ' '.join((self.title, self.firstName, self.lastName))
class District(VotesmartApiObject):
def __str__(self):
return self.name
class Election(VotesmartApiObject):
def __init__(self, d):
stages = [ElectionStage(s) for s in d.pop('stage')]
self.__dict__ = d
self.stages = stages
def __str__(self):
return self.name
class ElectionStage(VotesmartApiObject):
def __str__(self):
return '%s (%s)' % (self.name, self.electionDate)
class Official(VotesmartApiObject):
def __str__(self):
return ' '.join((self.title, self.firstName, self.lastName))
class LeadershipPosition(VotesmartApiObject):
def __str__(self):
return self.name
class Locality(VotesmartApiObject):
def __str__(self):
return self.name
class Measure(VotesmartApiObject):
def __str__(self):
return self.title
class MeasureDetail(VotesmartApiObject):
def __str__(self):
return self.title
class OfficeType(VotesmartApiObject):
def __str__(self):
return ': '.join((self.officeTypeId, self.name))
class OfficeBranch(VotesmartApiObject):
def __str__(self):
return ': '.join((self.officeBranchId, self.name))
class OfficeLevel(VotesmartApiObject):
def __str__(self):
return ': '.join((self.officeLevelId, self.name))
class Office(VotesmartApiObject):
def __str__(self):
return self.name
class Category(VotesmartApiObject):
def __str__(self):
return ': '.join((self.categoryId, self.name))
class Sig(VotesmartApiObject):
def __str__(self):
return ': '.join((self.sigId, self.name))
class SigDetail(VotesmartApiObject):
def __str__(self):
return self.name
class Rating(VotesmartApiObject):
def __str__(self):
return self.ratingText
class State(VotesmartApiObject):
def __str__(self):
return ' '.join((self.stateId, self.name))
class StateDetail(VotesmartApiObject):
def __str__(self):
return ' '.join((self.stateId, self.name))
class BillSponsor(VotesmartApiObject):
def __str__(self):
return self.name
class BillAction(VotesmartApiObject):
def __str__(self):
return ' - '.join((self.statusDate, self.stage))
class BillAmendment(VotesmartApiObject):
def __str__(self):
return self.title
class BillDetail(VotesmartApiObject):
def __init__(self, d):
sponsors = d.pop('sponsors')
actions = d.pop('actions')
amendments = d.pop('ammendments') # ammendments -- sic
self.sponsors = _result_to_obj(BillSponsor, sponsors['sponsor'])
self.actions = _result_to_obj(BillAction, actions['action'])
if amendments:
self.amendments = _result_to_obj(BillAmendment, amendments['ammendment'])
self.__dict__.update(d)
class BillActionDetail(VotesmartApiObject):
def __str__(self):
return self.officialTitle
class Bill(VotesmartApiObject):
def __str__(self):
return ' '.join((self.billNumber, self.title))
class Vote(VotesmartApiObject):
def __str__(self):
return ': '.join((self.candidateName, self.action))
class Veto(VotesmartApiObject):
def __str__(self):
return ' '.join((self.billNumber, self.billTitle))
def _result_to_obj(cls, result):
if isinstance(result, dict):
return [cls(result)]
else:
return [cls(o) for o in result]
class votesmart(object):
apikey = None
@staticmethod
def _apicall(func, params):
if votesmart.apikey is None:
raise VotesmartApiError('Missing Project Vote Smart apikey')
params = dict([(k,v) for (k,v) in params.iteritems() if v])
url = 'http://api.votesmart.org/%s?o=JSON&key=%s&%s' % (func,
votesmart.apikey, urllib.urlencode(params))
try:
response = urllib2.urlopen(url).read()
obj = json.loads(response)
if 'error' in obj:
raise VotesmartApiError(obj['error']['errorMessage'])
else:
return obj
except urllib2.HTTPError, e:
raise VotesmartApiError(e.read())
except ValueError, e:
raise VotesmartApiError('Invalid Response')
class address(object):
@staticmethod
def getCampaign(candidateId):
params = {'candidateId': candidateId}
result = votesmart._apicall('Address.getCampaign', params)
return _result_to_obj(Address, result['address']['office'])
@staticmethod
def getCampaignWebAddress(candidateId):
params = {'candidateId': candidateId}
result = votesmart._apicall('Address.getCampaignWebAddress', params)
return _result_to_obj(WebAddress, result['webaddress']['address'])
@staticmethod
def getCampaignByElection(electionId):
params = {'electionId': electionId}
result = votesmart._apicall('Address.getCampaignByElection', params)
return _result_to_obj(Address, result['address']['office'])
@staticmethod
def getOffice(candidateId):
params = {'candidateId': candidateId}
result = votesmart._apicall('Address.getOffice', params)
return _result_to_obj(Address, result['address']['office'])
@staticmethod
def getOfficeWebAddress(candidateId):
params = {'candidateId': candidateId}
result = votesmart._apicall('Address.getOfficeWebAddress', params)
return _result_to_obj(WebAddress, result['webaddress']['address'])
#@staticmethod
#def getOfficeByOfficeState(officeId, stateId=None):
# params = {'officeId': officeId, 'stateId': stateId}
# result = votesmart._apicall('Address.getOfficeByOfficeState', params)
# return _result_to_obj(Address, result['address']['office'])
class candidatebio(object):
@staticmethod
def getBio(candidateId):
params = {'candidateId': candidateId}
result = votesmart._apicall('CandidateBio.getBio', params)
return Bio(result['bio'])
@staticmethod
def getAddlBio(candidateId):
params = {'candidateId': candidateId}
result = votesmart._apicall('CandidateBio.getAddlBio', params)
return _result_to_obj(AddlBio,
result['addlbio']['additional']['item'])
class candidates(object):
@staticmethod
def getByOfficeState(officeId, stateId=None, electionYear=None):
params = {'officeId': officeId, 'stateId':stateId, 'electionYear': electionYear}
result = votesmart._apicall('Candidates.getByOfficeState', params)
return _result_to_obj(Candidate, result['candidateList']['candidate'])
@staticmethod
def getByLastname(lastName, electionYear=None):
params = {'lastName': lastName, 'electionYear':electionYear}
result = votesmart._apicall('Candidates.getByLastname', params)
return _result_to_obj(Candidate, result['candidateList']['candidate'])
@staticmethod
def getByLevenstein(lastName, electionYear=None):
params = {'lastName': lastName, 'electionYear':electionYear}
result = votesmart._apicall('Candidates.getByLevenstein', params)
return _result_to_obj(Candidate, result['candidateList']['candidate'])
@staticmethod
def getByElection(electionId):
params = {'electionId': electionId}
result = votesmart._apicall('Candidates.getByElection', params)
return _result_to_obj(Candidate, result['candidateList']['candidate'])
@staticmethod
def getByDistrict(districtId, electionYear=None):
params = {'districtId': districtId, 'electionYear':electionYear}
result = votesmart._apicall('Candidates.getByDistrict', params)
return _result_to_obj(Candidate, result['candidateList']['candidate'])
@staticmethod
def getByZip(zip5, zip4=None):
params = {'zip4': zip4, 'zip5': zip5}
result = votesmart._apicall('Candidates.getByZip', params)
return _result_to_obj(Candidate, result['candidateList']['candidate'])
class committee(object):
@staticmethod
def getTypes():
result = votesmart._apicall('Committee.getTypes', {})
return _result_to_obj(CommitteeType, result['committeeTypes']['type'])
@staticmethod
def getCommitteesByTypeState(typeId=None, stateId=None):
params = {'typeId':typeId, 'stateId':stateId}
result = votesmart._apicall('Committee.getCommitteesByTypeState', params)
return _result_to_obj(Committee, result['committees']['committee'])
@staticmethod
def getCommittee(committeeId):
params = {'committeeId' : committeeId}
result = votesmart._apicall('Committee.getCommittee', params)
return CommitteeDetail(result['committee'])
@staticmethod
def getCommitteeMembers(committeeId):
params = {'committeeId' : committeeId}
result = votesmart._apicall('Committee.getCommitteeMembers', params)
return _result_to_obj(CommitteeMember, result['committeeMembers']['member'])
class district(object):
@staticmethod
def getByOfficeState(officeId, stateId, districtName=None):
params = {'officeId':officeId, 'stateId': stateId, 'districtName': districtName}
result = votesmart._apicall('District.getByOfficeState', params)
return _result_to_obj(District, result['districtList']['district'])
@staticmethod
def getByZip(zip5, zip4=None):
params = {'zip5': zip5, 'zip4': zip4}
result = votesmart._apicall('District.getByZip', params)
return _result_to_obj(District, result['districtList']['district'])
class election(object):
@staticmethod
def getElection(electionId):
params = {'electionId':electionId}
result = votesmart._apicall('Election.getElection', params)
return Election(result['elections']['election'])
@staticmethod
def getElectionByYearState(year, stateId=None):
params = {'year':year, 'stateId':stateId}
result = votesmart._apicall('Election.getElectionByYearState', params)
return _result_to_obj(Election, result['elections']['election'])
@staticmethod
def getElectionByZip(zip5, zip4=None, year=None):
params = {'zip5': zip5, 'zip4': zip4, 'year': year}
result = votesmart._apicall('Election.getElectionByZip', params)
return _result_to_obj(Election, result['elections']['election'])
#@staticmethod
#def getStageCandidates(electionId, stageId,
# party=None, districtId=None, stateId=None):
# params = {'electionId':electionId, 'stageId':stageId, 'party':party,
# 'districtId':districtId, 'stateId':stateId}
# result = votesmart._apicall('Election.getElectionByYearState', params)
# ['stageCandidates']['candidate']
class leadership(object):
@staticmethod
def getPositions(stateId=None, officeId=None):
params = {'stateId':stateId, 'officeId':officeId}
result = votesmart._apicall('Leadership.getPositions', params)
return _result_to_obj(LeadershipPosition, result['leadership']['position'])
#@staticmethod
#def getCandidates(leadershipId, stateId=None):
# params = {'leadershipId':leadershipId, 'stateId':stateId}
# result = votesmart._apicall('Leadership.getCandidates', params)
# return result['leaders']['leader']
class local(object):
@staticmethod
def getCounties(stateId):
params = {'stateId': stateId}
result = votesmart._apicall('Local.getCounties', params)
return _result_to_obj(Locality, result['counties']['county'])
@staticmethod
def getCities(stateId):
params = {'stateId': stateId}
result = votesmart._apicall('Local.getCities', params)
return _result_to_obj(Locality, result['cities']['city'])
@staticmethod
def getOfficials(localId):
params = {'localId': localId}
result = votesmart._apicall('Local.getOfficials', params)
return _result_to_obj(Official, result['candidateList']['candidate'])
class measure(object):
@staticmethod
def getMeasuresByYearState(year, stateId):
params = {'year':year, 'stateId':stateId}
result = votesmart._apicall('Measure.getMeasuresByYearState', params)
return _result_to_obj(Measure, result['measures']['measure'])
@staticmethod
def getMeasure(measureId):
params = {'measureId':measureId}
result = votesmart._apicall('Measure.getMeasure', params)
return MeasureDetail(result['measure'])
class npat(object):
@staticmethod
def getNpat(candidateId):
params = {'candidateId':candidateId}
result = votesmart._apicall('Npat.getNpat', params)
return result['npat']
class office(object):
@staticmethod
def getTypes():
result = votesmart._apicall('Office.getTypes', {})
return _result_to_obj(OfficeType, result['officeTypes']['type'])
@staticmethod
def getBranches():
result = votesmart._apicall('Office.getBranches', {})
return _result_to_obj(OfficeBranch, result['branches']['branch'])
@staticmethod
def getLevels():
result = votesmart._apicall('Office.getLevels', {})
return _result_to_obj(OfficeLevel, result['levels']['level'])
@staticmethod
def getOfficesByType(typeId):
params = {'typeId':typeId}
result = votesmart._apicall('Office.getOfficesByType', params)
return _result_to_obj(Office, result['offices']['office'])
@staticmethod
def getOfficesByLevel(levelId):
params = {'levelId':levelId}
result = votesmart._apicall('Office.getOfficesByLevel', params)
return _result_to_obj(Office, result['offices']['office'])
@staticmethod
def getOfficesByTypeLevel(typeId, levelId):
params = {'typeId':typeId, 'levelId':levelId}
result = votesmart._apicall('Office.getOfficesByTypeLevel', params)
return _result_to_obj(Office, result['offices']['office'])
@staticmethod
def getOfficesByBranchLevel(branchId, levelId):
params = {'branchId':branchId, 'levelId':levelId}
result = votesmart._apicall('Office.getOfficesByBranchLevel', params)
return _result_to_obj(Office, result['offices']['office'])
class officials(object):
@staticmethod
def getStatewide(stateId=None):
params = {'stateId': stateId}
result = votesmart._apicall('Officials.getStatewide', params)
return _result_to_obj(Official, result['candidateList']['candidate'])
@staticmethod
def getByOfficeState(officeId, stateId=None):
params = {'officeId':officeId, 'stateId': stateId}
result = votesmart._apicall('Officials.getByOfficeState', params)
return _result_to_obj(Official, result['candidateList']['candidate'])
@staticmethod
def getByLastname(lastName):
params = {'lastName':lastName}
result = votesmart._apicall('Officials.getByLastname', params)
return _result_to_obj(Official, result['candidateList']['candidate'])
@staticmethod
def getByLevenstein(lastName):
params = {'lastName':lastName}
result = votesmart._apicall('Officials.getByLevenstein', params)
return _result_to_obj(Official, result['candidateList']['candidate'])
@staticmethod
def getByElection(electionId):
params = {'electionId':electionId}
result = votesmart._apicall('Officials.getByElection', params)
return _result_to_obj(Official, result['candidateList']['candidate'])
@staticmethod
def getByDistrict(districtId):
params = {'districtId':districtId}
result = votesmart._apicall('Officials.getByDistrict', params)
return _result_to_obj(Official, result['candidateList']['candidate'])
@staticmethod
def getByZip(zip5, zip4=None):
params = {'zip4': zip4, 'zip5': zip5}
result = votesmart._apicall('Officials.getByZip', params)
return _result_to_obj(Official, result['candidateList']['candidate'])
class rating(object):
@staticmethod
def getCategories(stateId=None):
params = {'stateId':stateId}
result = votesmart._apicall('Rating.getCategories', params)
return _result_to_obj(Category, result['categories']['category'])
@staticmethod
def getSigList(categoryId, stateId=None):
params = {'categoryId':categoryId, 'stateId':stateId}
result = votesmart._apicall('Rating.getSigList', params)
return _result_to_obj(Sig, result['sigs']['sig'])
@staticmethod
def getSig(sigId):
params = {'sigId':sigId}
result = votesmart._apicall('Rating.getSig', params)
return SigDetail(result['sig'])
@staticmethod
def getCandidateRating(candidateId, sigId):
params = {'candidateId':candidateId, 'sigId':sigId}
result = votesmart._apicall('Rating.getCandidateRating', params)
return _result_to_obj(Rating, result['candidateRating']['rating'])
class state(object):
@staticmethod
def getStateIDs():
result = votesmart._apicall('State.getStateIDs', {})
return _result_to_obj(State, result['stateList']['list']['state'])
@staticmethod
def getState(stateId):
params = {'stateId':stateId}
result = votesmart._apicall('State.getState', params)
return StateDetail(result['state']['details'])
class votes(object):
@staticmethod
def getCategories(year, stateId=None):
params = {'year':year, 'stateId':stateId}
result = votesmart._apicall('Votes.getCategories', params)
return _result_to_obj(Category, result['categories']['category'])
@staticmethod
def getBill(billId):
params = {'billId':billId}
result = votesmart._apicall('Votes.getBill', params)
return BillDetail(result['bill'])
@staticmethod
def getBillAction(actionId):
params = {'actionId':actionId}
result = votesmart._apicall('Votes.getBillAction', params)
return BillActionDetail(result['action'])
@staticmethod
def getBillActionVotes(actionId):
params = {'actionId':actionId}
result = votesmart._apicall('Votes.getBillActionVotes', params)
return _result_to_obj(Vote, result['votes']['vote'])
@staticmethod
def getBillActionVoteByOfficial(actionId, candidateId):
params = {'actionId':actionId, 'candidateId':candidateId}
result = votesmart._apicall('Votes.getBillActionVoteByOfficial', params)
return Vote(result['votes']['vote'])
@staticmethod
def getByBillNumber(billNumber):
params = {'billNumber': billNumber}
result = votesmart._apicall('Votes.getByBillNumber', params)
return _result_to_obj(Bill, result['bills']['bill'])
@staticmethod
def getBillsByCategoryYearState(categoryId, year, stateId=None):
params = {'categoryId':categoryId, 'year':year, 'stateId':stateId}
result = votesmart._apicall('Votes.getBillsByCategoryYearState', params)
return _result_to_obj(Bill, result['bills']['bill'])
@staticmethod
def getBillsByYearState(year, stateId=None):
params = {'year':year, 'stateId':stateId}
result = votesmart._apicall('Votes.getBillsByYearState', params)
return _result_to_obj(Bill, result['bills']['bill'])
@staticmethod
def getBillsByOfficialYearOffice(candidateId, year, officeId=None):
params = {'candidateId':candidateId, 'year':year, 'officeId':officeId}
result = votesmart._apicall('Votes.getBillsByOfficialYearOffice', params)
return _result_to_obj(Bill, result['bills']['bill'])
@staticmethod
def getBillsByCandidateCategoryOffice(candidateId, categoryId, officeId=None):
params = {'candidateId':candidateId, 'categoryId':categoryId, 'officeId':officeId}
result = votesmart._apicall('Votes.getBillsByCandidateCategoryOffice', params)
return _result_to_obj(Bill, result['bills']['bill'])
@staticmethod
def getBillsBySponsorYear(candidateId, year):
params = {'candidateId':candidateId, 'year':year}
result = votesmart._apicall('Votes.getBillsBySponsorYear', params)
return _result_to_obj(Bill, result['bills']['bill'])
@staticmethod
def getBillsBySponsorCategory(candidateId, categoryId):
params = {'candidateId':candidateId, 'categoryId':categoryId}
result = votesmart._apicall('Votes.getBillsBySponsorCategory', params)
return _result_to_obj(Bill, result['bills']['bill'])
@staticmethod
def getBillsByStateRecent(stateId=None, amount=None):
params = {'stateId':stateId, 'amount':amount}
result = votesmart._apicall('Votes.getBillsByStateRecent', params)
return _result_to_obj(Bill, result['bills']['bill'])
@staticmethod
def getVetoes(candidateId):
params = {'candidateId': candidateId}
result = votesmart._apicall('Votes.getVetoes', params)
return _result_to_obj(Veto, result['vetoes']['veto'])
|
mikejs/python-votesmart
|
votesmart.py
|
Python
|
bsd-3-clause
| 25,110
|
from django.conf.urls.defaults import *
from gitology.config import settings as gsettings
from gitology.d import urls as gitology_urls
urlpatterns = patterns('',
# some url not managed by gitology.
# gitology will add to this conf file for the rest of the urls.
(
'files/(?P<path>.*)$', 'django.views.static.serve',
{ 'document_root': gsettings.LOCAL_REPO_PATH.joinpath("files") },
),
(
'static/(?P<path>.*)$', 'django.views.static.serve',
{ 'document_root': gsettings.LOCAL_REPO_PATH.joinpath("static") },
),
)
urlpatterns += gitology_urls.urlpatterns
|
amitu/gitology
|
amitucom/urls.py
|
Python
|
bsd-3-clause
| 619
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myhpom', '0003_user_details'),
]
operations = [
migrations.DeleteModel(
name='StateAdvanceDirective',
),
migrations.AlterModelOptions(
name='state',
options={},
),
migrations.RemoveField(
model_name='state',
name='supported',
),
migrations.AddField(
model_name='state',
name='advance_directive_template',
field=models.FileField(help_text=b'AD instructions associated with this State', upload_to=b'myhpom/advance_directive_templates', blank=True),
),
migrations.AlterField(
model_name='state',
name='name',
field=models.CharField(help_text=b'Two-letter state abbreviation', unique=True, max_length=2),
),
migrations.AlterField(
model_name='state',
name='title',
field=models.CharField(help_text=b'The full (common) name of the state (e.g. Rhode Island)', max_length=1024),
),
]
|
ResearchSoftwareInstitute/MyHPOM
|
myhpom/migrations/0004_auto_20180708_0954.py
|
Python
|
bsd-3-clause
| 1,233
|
#!/usr/bin/env python
"""
This script tests the python class interface
"""
from __future__ import absolute_import, division, print_function
# standard imports:
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
import pyshtools
sys.path.append(os.path.join(os.path.dirname(__file__), "../Common"))
from FigStyle import style_shtools
# set shtools plot style:
mpl.rcParams.update(style_shtools)
# ==== MAIN FUNCTION ====
def main():
example1()
example2()
# ==== EXAMPLES ====
def example1():
# generate cap window
lmax = 20
nwin = 20
theta = 25.
cap = pyshtools.SHWindow.from_cap(theta, lmax, nwin=nwin)
cap.info()
cap.plot_windows(20, show=False, fname='cap_tapers.png')
cap.plot_coupling_matrix(30, nwin=5, show=False, fname='cap_coupling.png')
# ==== EXAMPLES ====
def example2():
# generate cap window
lmax = 15
nwins = 15
coeffs = pyshtools.SHCoeffs.from_file('../../ExampleDataFiles/srtmp300.msl')
topo = coeffs.expand(grid='DH2')
dh_mask = topo.data > 0.
print(dh_mask.shape)
region = pyshtools.SHWindow.from_mask(dh_mask, lmax, nwins)
region.info()
region.plot_windows(nwins, show=False, fname='continent_tapers.png')
region.plot_coupling_matrix(30, 5, show=False,
fname='continent_coupling.png')
# ==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
|
ioshchepkov/SHTOOLS
|
examples/python/ClassInterface/WindowExample.py
|
Python
|
bsd-3-clause
| 1,515
|
from display_exceptions import NotFound, PermissionDenied
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.forms import modelformset_factory
from django.shortcuts import redirect
from base.views import render_cms_special
from svfinance.forms import PeriodForm, PeriodAccessForm
from svfinance.models import Period, PeriodAccess, Account, TransactionLine
from django.utils.translation import ugettext_lazy as _
from svfinance.utils import today
def get_period(func):
"""
View decorator to instantiate the period object.
"""
#todo: caching
def func_with_period(request, period=None, *args, **kwargs):
if period is None:
period_obj = None
else:
try:
period_obj = Period.objects.get(name=period)
except Period.DoesNotExist:
raise NotFound(_('No booking period with id "{0:s}" could be found.').format(period))
return func(request, period_obj, *args, **kwargs)
return func_with_period
@login_required
def auto_select_period(request):
"""
From among periods you have access to, find the most-recently started.
"""
periods = dict()
for team in request.user.teams:
for access in team.periods_accesses.prefetch_related():
periods[access.period.pk] = access.period
most_recent_period = None
today_ = today()
for period in periods.values():
if most_recent_period is None:
most_recent_period = period
elif most_recent_period.start < today_:
if most_recent_period.start < most_recent_period.start:
most_recent_period = period
if most_recent_period is None:
raise PermissionDenied('You do not have access to any bookkeeping periods. Continue to create one.', next=reverse('create_period'))
return redirect(to=reverse('list_accounts', kwargs=dict(period=most_recent_period.slug)))
@login_required
@get_period
def edit_period(request, period=None):
#todo: permission check
AccessFormSet = modelformset_factory(PeriodAccess, form=PeriodAccessForm, extra=2, can_delete=True)
if period is None:
period_form = PeriodForm(data=request.POST or None)
access_forms = AccessFormSet(data=request.POST or None)
else:
period_form = PeriodForm(data=request.POST or None, instance=period)
access_forms = AccessFormSet(data=request.POST or None, queryset=period.accesses.all())
period_valid = period_form.is_valid()
access_valid = access_forms.is_valid()
if period_valid and access_valid:
saved_period = period_form.save()
access_forms.save()
return redirect(to=reverse('list_accounts', kwargs=dict(period=saved_period.slug)))
#todo: set access instance periods?
#todo: at least one access?
return render_cms_special(request, 'edit_period.html', {
'period': period,
'period_form': period_form,
'access_forms': access_forms,
})
@login_required
@get_period
def list_accounts(request, period):
#todo: permission check
#todo: tree structure plugin (treebeard?)
assert period is not None
expense_acc = Account.objects.filter(period=period, type=Account.EXPENSE, parent=None)
asset_acc = Account.objects.filter(period=period, type=Account.ASSET, parent=None)
liability_acc = Account.objects.filter(period=period, type=Account.LIABILITY, parent=None)
user_acc = Account.objects.filter(period=period, type=Account.DEBTCRED, parent=None)
debtors, creditors, debtor_total, creditor_total = [], [], 0, 0
for acc in user_acc:
tot = acc.total()
if tot > 0:
debtors.append(acc)
debtor_total += tot
if tot < 0:
creditors.append(acc)
creditor_total += tot
return render_cms_special(request, 'accounts.html', {
'period': period,
'expense_acc': expense_acc,
'asset_acc': asset_acc,
'liability_acc': liability_acc,
'creditors': creditors,
'creditor_total': creditor_total,
'debtors': debtors,
'debtor_total': debtor_total,
})
@login_required
def list_accounts_redirect(request, period):
return redirect(reverse('list_accounts', kwargs=dict(period=period)), permanent=True)
@login_required
@get_period
def list_account_transactions(request, period, account):
#todo: permission check
raise NotImplementedError('laterzz')
@login_required
@get_period
def budget_all(request, period):
#todo: permission check
#todo: pagination
debtcred_other_lastaccs, debtcred_other_lines, prev_acc_chain = {}, {}, {}
debtcred_user_lastaccs, debtcred_user_lines = {}, {user.pk: (user, []) for user in get_user_model().objects.all()}
all_lines = {acc: [] for acc in Account.objects.filter(type=Account.DEBTCRED, period=period).order_by('pk')}
for line in TransactionLine.objects.filter(account__type=Account.DEBTCRED).prefetch_related():
all_lines[line.account].append(line)
for acc, lines in all_lines.items():
if acc.user:
debtcred_user_lines[acc.user.pk][1].extend(lines)
debtcred_user_lastaccs[acc.user.pk] = acc
elif acc.prev_period_account is None:
debtcred_other_lines[acc.pk] = list(line for line in lines)
debtcred_other_lastaccs[acc.pk] = acc
else:
prev_acc_chain[acc.prev_period_account.pk] = (acc.pk, lines, acc)
for pk, lines_li in debtcred_other_lines.items():
child_data = prev_acc_chain.pop(pk, None)
while child_data:
lines_li.extend(child_data[1])
prev_acc_chain.pop(child_data[0], None)
debtcred_other_lastaccs[child_data[0]] = child_data[2]
debtcred_user = tuple((debtcred_user_lastaccs[user.pk], user, sum([line.amount for line in lines], 0), lines)
for user, lines in debtcred_user_lines.values())
debtcred_other = tuple((acc, sum([line.amount for line in lines], 0), lines)
for acc, lines in zip(debtcred_other_lastaccs.values(), debtcred_other_lines.values()))
return render_cms_special(request, 'budget_all.html', {
'period': period,
'debtcred_user': debtcred_user,
'debtcred_other': debtcred_other,
})
@login_required
@get_period
def budget_user(request, user=None):
#todo: permission check
raise NotImplementedError('bye')
|
mverleg/svsite
|
source/svfinance/views.py
|
Python
|
bsd-3-clause
| 5,911
|
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core import hydroshare
from hs_core.models import GenericResource
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set, \
assertUserResourceState, assertResourceUserState
class T03CreateResource(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T03CreateResource, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.cat = hydroshare.create_account(
'cat@gmail.com',
username='cat',
first_name='not a dog',
last_name='last_name_cat',
superuser=False,
groups=[]
)
self.dog = hydroshare.create_account(
'dog@gmail.com',
username='dog',
first_name='a little arfer',
last_name='last_name_dog',
superuser=False,
groups=[]
)
def test_01_create(self):
"""Resource creator has appropriate access"""
cat = self.cat
# check that user cat owns and holds nothing
assertUserResourceState(self, cat, [], [], [])
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
assertUserResourceState(self, cat, [holes], [], [])
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# unsharing with cat would violate owner constraint
self.assertTrue(
is_equal_to_as_set(
[], cat.uaccess.get_resource_unshare_users(holes)))
self.assertFalse(
cat.uaccess.can_unshare_resource_with_user(
holes, cat))
def test_02_isolate(self):
"""A user who didn't create a resource cannot access it"""
cat = self.cat
dog = self.dog
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
# check that resource was created
assertUserResourceState(self, cat, [holes], [], [])
# check that resource is not accessible to others
assertUserResourceState(self, dog, [], [], [])
# metadata should be the same as before
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for non-owner
self.assertFalse(dog.uaccess.owns_resource(holes))
self.assertFalse(dog.uaccess.can_change_resource(holes))
self.assertFalse(dog.uaccess.can_view_resource(holes))
# composite django state for non-owner
self.assertFalse(dog.uaccess.can_change_resource_flags(holes))
self.assertFalse(dog.uaccess.can_delete_resource(holes))
self.assertFalse(
dog.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertFalse(
dog.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertFalse(
dog.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# test list access functions for unshare targets
# these return empty because allowing this would violate the last owner
# rule
self.assertTrue(
is_equal_to_as_set(
[], cat.uaccess.get_resource_unshare_users(holes)))
self.assertTrue(
is_equal_to_as_set(
[], dog.uaccess.get_resource_unshare_users(holes)))
def test_06_check_flag_immutable(self):
"""Resource owner can set and reset immutable flag"""
cat = self.cat
dog = self.dog
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
assertUserResourceState(self, cat, [holes], [], [])
assertResourceUserState(self, holes, [cat], [], [])
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it immutable: what changes?
holes.raccess.immutable = True
holes.raccess.save()
# metadata state
self.assertTrue(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
assertUserResourceState(self, cat, [holes], [], [])
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertFalse(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# django admin access
self.assertFalse(self.admin.uaccess.owns_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource(holes))
self.assertTrue(self.admin.uaccess.can_view_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource_flags(holes))
self.assertTrue(self.admin.uaccess.can_delete_resource(holes))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# change squash
self.cat.uaccess.share_resource_with_user(
holes, dog, PrivilegeCodes.CHANGE)
# CHANGE squashed to VIEW
assertUserResourceState(self, dog, [], [], [holes])
# now no longer immutable
holes.raccess.immutable = False
holes.raccess.save()
assertUserResourceState(self, dog, [], [holes], [])
self.cat.uaccess.unshare_resource_with_user(holes, dog)
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
def test_07_check_flag_discoverable(self):
"""Resource owner can set and reset discoverable flag"""
cat = self.cat
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# is it listed as discoverable?
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.discoverable_resources.all()))
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.public_resources.all()))
# make it discoverable
holes.raccess.discoverable = True
holes.raccess.save()
# is it listed as discoverable?
self.assertTrue(
is_equal_to_as_set(
[holes],
GenericResource.discoverable_resources.all()))
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.public_resources.all()))
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertTrue(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it not discoverable
holes.raccess.discoverable = False
holes.raccess.save()
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# django admin should have full access to any not discoverable
# resource
self.assertTrue(self.admin.uaccess.can_change_resource_flags(holes))
self.assertTrue(self.admin.uaccess.can_delete_resource(holes))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# TODO: test get_discoverable_resources and get_public_resources
def test_08_check_flag_published(self):
"""Resource owner can set and reset published flag"""
cat = self.cat
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it published
holes.raccess.published = True
holes.raccess.save()
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertTrue(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertFalse(cat.uaccess.can_change_resource_flags(holes))
self.assertFalse(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# django admin access for published resource
self.assertFalse(self.admin.uaccess.owns_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource(holes))
self.assertTrue(self.admin.uaccess.can_view_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource_flags(holes))
# admin even can delete a published resource
self.assertTrue(self.admin.uaccess.can_delete_resource(holes))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it not published
holes.raccess.published = False
holes.raccess.save()
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
def test_09_check_flag_public(self):
"""Resource owner can set and reset public flag"""
cat = self.cat
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# is it listed as discoverable?
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.discoverable_resources.all()))
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.public_resources.all()))
# make it public
holes.raccess.public = True
holes.raccess.save()
# is it listed as discoverable?
self.assertTrue(
is_equal_to_as_set(
[holes],
GenericResource.discoverable_resources.all()))
self.assertTrue(
is_equal_to_as_set(
[holes],
GenericResource.public_resources.all()))
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertTrue(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it not public
holes.raccess.public = False
holes.raccess.save()
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# django admin should have full access to any private resource
self.assertFalse(self.admin.uaccess.owns_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource_flags(holes))
self.assertTrue(self.admin.uaccess.can_delete_resource(holes))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
|
FescueFungiShare/hydroshare
|
hs_access_control/tests/test_create_resource.py
|
Python
|
bsd-3-clause
| 24,563
|
import re
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from django.core.files import temp
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.urlresolvers import reverse
from olympia.amo.tests import formset, initial
from olympia.addons.models import Addon, AddonUser
from olympia.applications.models import AppVersion
from olympia.devhub.models import ActivityLog
from olympia.files.models import File
from olympia.users.models import UserProfile
from olympia.versions.models import ApplicationsVersions, Version
class TestVersion(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestVersion, self).setUp()
self.client.login(username='del@icio.us', password='password')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = self.get_addon()
self.version = Version.objects.get(id=81551)
self.url = self.addon.get_dev_url('versions')
self.disable_url = self.addon.get_dev_url('disable')
self.enable_url = self.addon.get_dev_url('enable')
self.unlist_url = self.addon.get_dev_url('unlist')
self.delete_url = reverse('devhub.versions.delete', args=['a3615'])
self.delete_data = {'addon_id': self.addon.pk,
'version_id': self.version.pk}
def get_addon(self):
return Addon.objects.get(id=3615)
def get_doc(self):
res = self.client.get(self.url)
eq_(res.status_code, 200)
return pq(res.content)
def test_version_status_public(self):
doc = self.get_doc()
assert doc('.addon-status')
self.addon.update(status=amo.STATUS_DISABLED, disabled_by_user=True)
doc = self.get_doc()
assert doc('.addon-status .status-admin-disabled')
eq_(doc('.addon-status .status-admin-disabled').text(),
'Disabled by Mozilla')
self.addon.update(disabled_by_user=False)
doc = self.get_doc()
eq_(doc('.addon-status .status-admin-disabled').text(),
'Disabled by Mozilla')
self.addon.update(status=amo.STATUS_PUBLIC, disabled_by_user=True)
doc = self.get_doc()
eq_(doc('.addon-status .status-disabled').text(),
'You have disabled this add-on')
def test_no_validation_results(self):
doc = self.get_doc()
v = doc('td.file-validation').text()
eq_(re.sub(r'\s+', ' ', v),
'All Platforms Not validated. Validate now.')
eq_(doc('td.file-validation a').attr('href'),
reverse('devhub.file_validation',
args=[self.addon.slug, self.version.all_files[0].id]))
def test_upload_link_label_in_edit_nav(self):
url = reverse('devhub.versions.edit',
args=(self.addon.slug, self.version.pk))
r = self.client.get(url)
doc = pq(r.content)
eq_(doc('.addon-status>.addon-upload>strong>a').text(),
'Upload a new file')
def test_delete_message(self):
"""Make sure we warn our users of the pain they will feel."""
r = self.client.get(self.url)
doc = pq(r.content)
eq_(doc('#modal-delete p').eq(0).text(),
'Deleting your add-on will permanently remove it from the site '
'and prevent its GUID from being submitted again by others.')
def test_delete_message_if_bits_are_messy(self):
"""Make sure we warn krupas of the pain they will feel."""
self.addon.highest_status = amo.STATUS_NULL
self.addon.status = amo.STATUS_UNREVIEWED
self.addon.save()
r = self.client.get(self.url)
doc = pq(r.content)
eq_(doc('#modal-delete p').eq(0).text(),
'Deleting your add-on will permanently remove it from the site '
'and prevent its GUID from being submitted again by others.')
def test_delete_message_incomplete(self):
"""
If an addon has highest_status = 0, they shouldn't be bothered with a
blacklisting threat if they hit delete.
"""
self.addon.highest_status = amo.STATUS_NULL
self.addon.status = amo.STATUS_NULL
self.addon.save()
r = self.client.get(self.url)
doc = pq(r.content)
# Normally 2 paragraphs, one is the warning which we should take out.
eq_(doc('#modal-delete p.warning').length, 0)
def test_delete_version(self):
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=81551).exists()
eq_(ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id)
.count(), 1)
def test_delete_version_then_detail(self):
version, file = self._extra_version_and_file(amo.STATUS_LITE)
self.client.post(self.delete_url, self.delete_data)
res = self.client.get(reverse('addons.detail', args=[self.addon.slug]))
eq_(res.status_code, 200)
def test_cant_delete_version(self):
self.client.logout()
res = self.client.post(self.delete_url, self.delete_data)
eq_(res.status_code, 302)
assert Version.objects.filter(pk=81551).exists()
def test_version_delete_status_null(self):
res = self.client.post(self.delete_url, self.delete_data)
eq_(res.status_code, 302)
eq_(self.addon.versions.count(), 0)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_NULL)
def _extra_version_and_file(self, status):
version = Version.objects.get(id=81551)
version_two = Version(addon=self.addon,
license=version.license,
version='1.2.3')
version_two.save()
file_two = File(status=status, version=version_two)
file_two.save()
return version_two, file_two
def test_version_delete_status(self):
self._extra_version_and_file(amo.STATUS_PUBLIC)
res = self.client.post(self.delete_url, self.delete_data)
eq_(res.status_code, 302)
eq_(self.addon.versions.count(), 1)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_PUBLIC)
def test_version_delete_status_unreviewd(self):
self._extra_version_and_file(amo.STATUS_BETA)
res = self.client.post(self.delete_url, self.delete_data)
eq_(res.status_code, 302)
eq_(self.addon.versions.count(), 1)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_UNREVIEWED)
@mock.patch('olympia.files.models.File.hide_disabled_file')
def test_user_can_disable_addon(self, hide_mock):
self.addon.update(status=amo.STATUS_PUBLIC,
disabled_by_user=False)
res = self.client.post(self.disable_url)
eq_(res.status_code, 302)
addon = Addon.objects.get(id=3615)
eq_(addon.disabled_by_user, True)
eq_(addon.status, amo.STATUS_PUBLIC)
assert hide_mock.called
entry = ActivityLog.objects.get()
eq_(entry.action, amo.LOG.USER_DISABLE.id)
msg = entry.to_string()
assert self.addon.name.__unicode__() in msg, ("Unexpected: %r" % msg)
@mock.patch('olympia.devhub.views.unindex_addons')
def test_user_can_unlist_addon(self, unindex):
self.addon.update(status=amo.STATUS_PUBLIC, disabled_by_user=False,
is_listed=True)
res = self.client.post(self.unlist_url)
assert res.status_code == 302
addon = Addon.with_unlisted.get(id=3615)
assert addon.status == amo.STATUS_PUBLIC
assert not addon.is_listed
# Make sure we remove the addon from the search index.
assert unindex.delay.called
entry = ActivityLog.objects.get()
assert entry.action == amo.LOG.ADDON_UNLISTED.id
msg = entry.to_string()
assert self.addon.name.__unicode__() in msg
@mock.patch('olympia.devhub.views.unindex_addons')
def test_user_can_unlist_hidden_addon(self, unindex):
self.addon.update(status=amo.STATUS_PUBLIC, disabled_by_user=True,
is_listed=True)
res = self.client.post(self.unlist_url)
assert res.status_code == 302
addon = Addon.with_unlisted.get(id=3615)
assert addon.status == amo.STATUS_PUBLIC
assert not addon.is_listed
assert not addon.disabled_by_user
# Make sure we remove the addon from the search index.
assert unindex.delay.called
entry = ActivityLog.objects.get()
assert entry.action == amo.LOG.ADDON_UNLISTED.id
msg = entry.to_string()
assert self.addon.name.__unicode__() in msg
def test_user_get(self):
eq_(self.client.get(self.enable_url).status_code, 405)
def test_user_can_enable_addon(self):
self.addon.update(status=amo.STATUS_PUBLIC, disabled_by_user=True)
res = self.client.post(self.enable_url)
self.assert3xx(res, self.url, 302)
addon = self.get_addon()
eq_(addon.disabled_by_user, False)
eq_(addon.status, amo.STATUS_PUBLIC)
entry = ActivityLog.objects.get()
eq_(entry.action, amo.LOG.USER_ENABLE.id)
msg = entry.to_string()
assert unicode(self.addon.name) in msg, ("Unexpected: %r" % msg)
def test_unprivileged_user_cant_disable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
res = self.client.post(self.disable_url)
eq_(res.status_code, 302)
eq_(Addon.objects.get(id=3615).disabled_by_user, False)
def test_non_owner_cant_disable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(username='regular@mozilla.com',
password='password')
res = self.client.post(self.disable_url)
eq_(res.status_code, 403)
eq_(Addon.objects.get(id=3615).disabled_by_user, False)
def test_non_owner_cant_enable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(username='regular@mozilla.com',
password='password')
res = self.client.get(self.enable_url)
eq_(res.status_code, 403)
eq_(Addon.objects.get(id=3615).disabled_by_user, False)
def test_non_owner_cant_change_status(self):
"""A non-owner can't use the radio buttons."""
self.addon.update(disabled_by_user=False)
addon_user = AddonUser.objects.get(addon=self.addon)
addon_user.role = amo.AUTHOR_ROLE_VIEWER
addon_user.save()
res = self.client.get(self.url)
doc = pq(res.content)
assert doc('.enable-addon').attr('checked') == 'checked'
assert doc('.enable-addon').attr('disabled') == 'disabled'
assert not doc('.disable-addon').attr('checked')
assert doc('.disable-addon').attr('disabled') == 'disabled'
assert not doc('.unlist-addon').attr('checked')
assert doc('.unlist-addon').attr('disabled') == 'disabled'
def test_published_addon_radio(self):
"""Published (listed) addon is selected: can hide or publish."""
self.addon.update(disabled_by_user=False)
res = self.client.get(self.url)
doc = pq(res.content)
assert doc('.enable-addon').attr('checked') == 'checked'
enable_url = self.addon.get_dev_url('enable')
assert doc('.enable-addon').attr('data-url') == enable_url
assert not doc('.enable-addon').attr('disabled')
assert doc('#modal-disable')
assert doc('#modal-unlist')
assert not doc('.disable-addon').attr('checked')
assert not doc('.disable-addon').attr('disabled')
assert not doc('.unlist-addon').attr('checked')
assert not doc('.unlist-addon').attr('disabled')
def test_hidden_addon_radio(self):
"""Hidden (disabled) addon is selected: can hide or publish."""
self.addon.update(disabled_by_user=True)
res = self.client.get(self.url)
doc = pq(res.content)
assert not doc('.enable-addon').attr('checked')
assert not doc('.enable-addon').attr('disabled')
assert doc('.disable-addon').attr('checked') == 'checked'
assert not doc('.disable-addon').attr('disabled')
assert not doc('.unlist-addon').attr('checked')
assert not doc('.unlist-addon').attr('disabled')
assert not doc('#modal-disable')
assert doc('#modal-unlist')
def test_status_disabled_addon_radio(self):
"""Disabled by Mozilla addon: hidden selected, can't change status."""
self.addon.update(status=amo.STATUS_DISABLED, disabled_by_user=False)
res = self.client.get(self.url)
doc = pq(res.content)
assert not doc('.enable-addon').attr('checked')
assert doc('.enable-addon').attr('disabled') == 'disabled'
assert doc('.disable-addon').attr('checked') == 'checked'
assert doc('.disable-addon').attr('disabled') == 'disabled'
assert not doc('.unlist-addon').attr('checked')
assert doc('.unlist-addon').attr('disabled') == 'disabled'
def test_unlisted_addon_cant_change_status(self):
"""Unlisted addon: can't change its status."""
self.addon.update(disabled_by_user=False, is_listed=False)
res = self.client.get(self.url)
doc = pq(res.content)
assert not doc('.enable-addon').attr('checked')
assert doc('.enable-addon').attr('disabled') == 'disabled'
assert not doc('.disable-addon').attr('checked')
assert doc('.disable-addon').attr('disabled') == 'disabled'
assert doc('.unlist-addon').attr('checked') == 'checked'
assert not doc('.unlist-addon').attr('disabled')
assert doc('#modal-disable')
assert not doc('#modal-unlist')
def test_cancel_get(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
eq_(self.client.get(cancel_url).status_code, 405)
def test_cancel_wrong_status(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
for status in Addon.STATUS_CHOICES:
if status in amo.UNDER_REVIEW_STATUSES + (amo.STATUS_DELETED,):
continue
self.addon.update(status=status)
self.client.post(cancel_url)
eq_(Addon.objects.get(id=3615).status, status)
def test_cancel(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
self.client.post(cancel_url)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_LITE)
for status in (amo.STATUS_UNREVIEWED, amo.STATUS_NOMINATED):
self.addon.update(status=status)
self.client.post(cancel_url)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_NULL)
def test_not_cancel(self):
self.client.logout()
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
eq_(self.addon.status, amo.STATUS_PUBLIC)
res = self.client.post(cancel_url)
eq_(res.status_code, 302)
eq_(Addon.objects.get(id=3615).status, amo.STATUS_PUBLIC)
def test_cancel_button(self):
for status in Addon.STATUS_CHOICES:
if status not in amo.UNDER_REVIEW_STATUSES:
continue
self.addon.update(status=status)
res = self.client.get(self.url)
doc = pq(res.content)
assert doc('#cancel-review')
assert doc('#modal-cancel')
def test_not_cancel_button(self):
for status in Addon.STATUS_CHOICES:
if status in amo.UNDER_REVIEW_STATUSES:
continue
self.addon.update(status=status)
res = self.client.get(self.url)
doc = pq(res.content)
assert not doc('#cancel-review')
assert not doc('#modal-cancel')
def test_purgatory_request_review(self):
self.addon.update(status=amo.STATUS_PURGATORY)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button').text()
eq_(buttons, 'Request Preliminary Review Request Full Review')
def test_incomplete_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button').text()
eq_(buttons, 'Request Preliminary Review Request Full Review')
def test_rejected_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
self.addon.latest_version.files.update(status=amo.STATUS_DISABLED)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button').text()
eq_(buttons, None)
def test_add_version_modal(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
# Make sure checkboxes are visible:
eq_(doc('.supported-platforms input.platform').length, 5)
eq_(set([i.attrib['type'] for i in doc('input.platform')]),
set(['checkbox']))
class TestVersionEditMixin(object):
def get_addon(self):
return Addon.objects.no_cache().get(id=3615)
def get_version(self):
return self.get_addon().current_version
def formset(self, *args, **kw):
defaults = {'approvalnotes': 'xxx'}
defaults.update(kw)
return formset(*args, **defaults)
class TestVersionEditBase(TestVersionEditMixin, TestCase):
fixtures = ['base/users', 'base/addon_3615', 'base/thunderbird']
def setUp(self):
super(TestVersionEditBase, self).setUp()
self.client.login(username='del@icio.us', password='password')
self.addon = self.get_addon()
self.version = self.get_version()
self.url = reverse('devhub.versions.edit',
args=['a3615', self.version.id])
self.v1, _created = AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='1.0')
self.v5, _created = AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='5.0')
class TestVersionEditMobile(TestVersionEditBase):
def setUp(self):
super(TestVersionEditMobile, self).setUp()
self.version.apps.all().delete()
app_vr = AppVersion.objects.create(application=amo.ANDROID.id,
version='1.0')
ApplicationsVersions.objects.create(version=self.version,
application=amo.ANDROID.id,
min=app_vr, max=app_vr)
self.version.files.update(platform=amo.PLATFORM_ANDROID.id)
def test_mobile_platform_options(self):
ctx = self.client.get(self.url).context
fld = ctx['file_form'].forms[0]['platform'].field
eq_(sorted(amo.PLATFORMS[p[0]].shortname for p in fld.choices),
['android'])
class TestVersionEditDetails(TestVersionEditBase):
def setUp(self):
super(TestVersionEditDetails, self).setUp()
ctx = self.client.get(self.url).context
compat = initial(ctx['compat_form'].forms[0])
files = initial(ctx['file_form'].forms[0])
self.initial = formset(compat, **formset(files, prefix='files'))
def formset(self, *args, **kw):
defaults = dict(self.initial)
defaults.update(kw)
return super(TestVersionEditDetails, self).formset(*args, **defaults)
def test_edit_notes(self):
d = self.formset(releasenotes='xx', approvalnotes='yy')
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
version = self.get_version()
eq_(unicode(version.releasenotes), 'xx')
eq_(unicode(version.approvalnotes), 'yy')
def test_version_number_redirect(self):
url = self.url.replace(str(self.version.id), self.version.version)
r = self.client.get(url, follow=True)
self.assert3xx(r, self.url)
def test_supported_platforms(self):
res = self.client.get(self.url)
choices = res.context['new_file_form'].fields['platform'].choices
taken = [f.platform for f in self.version.files.all()]
platforms = set(self.version.compatible_platforms()) - set(taken)
eq_(len(choices), len(platforms))
def test_can_upload(self):
self.version.files.all().delete()
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('a.add-file')
def test_not_upload(self):
res = self.client.get(self.url)
doc = pq(res.content)
assert not doc('a.add-file')
def test_add(self):
res = self.client.get(self.url)
doc = pq(res.content)
assert res.context['compat_form'].extra_forms
assert doc('p.add-app')[0].attrib['class'] == 'add-app'
def test_add_not(self):
for id in [18, 52, 59, 60, 61]:
av = AppVersion(application=id, version='1')
av.save()
ApplicationsVersions(application=id, min=av, max=av,
version=self.version).save()
res = self.client.get(self.url)
doc = pq(res.content)
assert not res.context['compat_form'].extra_forms
assert doc('p.add-app')[0].attrib['class'] == 'add-app hide'
def test_should_accept_zip_source_file(self):
tdir = temp.gettempdir()
tmp_file = temp.NamedTemporaryFile
with tmp_file(suffix=".zip", dir=tdir) as source_file:
source_file.write('a' * (2 ** 21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
eq_(response.status_code, 302)
version = Version.objects.get(pk=self.version.pk)
assert version.source
assert version.addon.admin_review
# Check that the corresponding automatic activity log has been created.
log = ActivityLog.objects.get(action=amo.LOG.REQUEST_SUPER_REVIEW.id)
assert log.details['comments'] == (
u'This version has been automatically flagged as admin review, as '
u'it had some source files attached when submitted.')
def test_should_not_accept_exe_source_file(self):
tdir = temp.gettempdir()
tmp_file = temp.NamedTemporaryFile
with tmp_file(suffix=".exe", dir=tdir) as source_file:
source_file.write('a' * (2 ** 21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
eq_(response.status_code, 200)
assert not Version.objects.get(pk=self.version.pk).source
def test_dont_reset_admin_review_flag_if_no_new_source(self):
tdir = temp.gettempdir()
tmp_file = temp.NamedTemporaryFile
with tmp_file(suffix=".zip", dir=tdir) as source_file:
source_file.write('a' * (2 ** 21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
eq_(response.status_code, 302)
version = Version.objects.get(pk=self.version.pk)
assert version.source
assert version.addon.admin_review
# Unset the "admin review" flag, and re save the version. It shouldn't
# reset the flag, as the source hasn't changed.
version.addon.update(admin_review=False)
data = self.formset(name='some other name')
response = self.client.post(self.url, data)
eq_(response.status_code, 302)
version = Version.objects.get(pk=self.version.pk)
assert version.source
assert not version.addon.admin_review
class TestVersionEditSearchEngine(TestVersionEditMixin,
amo.tests.BaseTestCase):
# https://bugzilla.mozilla.org/show_bug.cgi?id=605941
fixtures = ['base/users', 'base/thunderbird', 'base/addon_4594_a9.json']
def setUp(self):
super(TestVersionEditSearchEngine, self).setUp()
self.client.login(username='admin@mozilla.com', password='password')
self.url = reverse('devhub.versions.edit',
args=['a4594', 42352])
def test_search_engine_edit(self):
dd = self.formset(prefix="files", releasenotes='xx',
approvalnotes='yy')
r = self.client.post(self.url, dd)
eq_(r.status_code, 302)
version = Addon.objects.no_cache().get(id=4594).current_version
eq_(unicode(version.releasenotes), 'xx')
eq_(unicode(version.approvalnotes), 'yy')
def test_no_compat(self):
r = self.client.get(self.url)
doc = pq(r.content)
assert not doc("#id_form-TOTAL_FORMS")
def test_no_upload(self):
r = self.client.get(self.url)
doc = pq(r.content)
assert not doc('a.add-file')
@mock.patch('olympia.versions.models.Version.is_allowed_upload')
def test_can_upload(self, allowed):
allowed.return_value = True
res = self.client.get(self.url)
doc = pq(res.content)
assert doc('a.add-file')
class TestVersionEditFiles(TestVersionEditBase):
def setUp(self):
super(TestVersionEditFiles, self).setUp()
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
self.compat = initial(f)
def formset(self, *args, **kw):
compat = formset(self.compat, initial_count=1)
compat.update(kw)
return super(TestVersionEditFiles, self).formset(*args, **compat)
def test_delete_file(self):
version = self.addon.current_version
version.files.all()[0].update(status=amo.STATUS_UNREVIEWED)
eq_(self.version.files.count(), 1)
forms = map(initial,
self.client.get(self.url).context['file_form'].forms)
forms[0]['DELETE'] = True
eq_(ActivityLog.objects.count(), 0)
r = self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(ActivityLog.objects.count(), 2)
log = ActivityLog.objects.order_by('created')[1]
eq_(log.to_string(), u'File delicious_bookmarks-2.1.072-fx.xpi deleted'
' from <a href="/en-US/firefox/addon/a3615'
'/versions/2.1.072">Version 2.1.072</a> of <a '
'href="/en-US/firefox/addon/a3615/">Delicious '
'Bookmarks</a>.')
eq_(r.status_code, 302)
eq_(self.version.files.count(), 0)
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_unique_platforms(self):
# Move the existing file to Linux.
f = self.version.files.get()
f.update(platform=amo.PLATFORM_LINUX.id)
# And make a new file for Mac.
File.objects.create(version=self.version,
platform=amo.PLATFORM_MAC.id)
forms = map(initial,
self.client.get(self.url).context['file_form'].forms)
forms[1]['platform'] = forms[0]['platform']
r = self.client.post(self.url, self.formset(*forms, prefix='files'))
doc = pq(r.content)
assert doc('#id_files-0-platform')
eq_(r.status_code, 200)
eq_(r.context['file_form'].non_form_errors(),
['A platform can only be chosen once.'])
def test_all_platforms(self):
version = self.addon.current_version
version.files.all()[0].update(status=amo.STATUS_UNREVIEWED)
File.objects.create(version=self.version,
platform=amo.PLATFORM_MAC.id)
forms = self.client.get(self.url).context['file_form'].forms
forms = map(initial, forms)
res = self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(res.context['file_form'].non_form_errors()[0],
'The platform All cannot be combined with specific platforms.')
def test_all_platforms_and_delete(self):
version = self.addon.current_version
version.files.all()[0].update(status=amo.STATUS_UNREVIEWED)
File.objects.create(
version=self.version, platform=amo.PLATFORM_MAC.id)
forms = self.client.get(self.url).context['file_form'].forms
forms = map(initial, forms)
# A test that we don't check the platform for deleted files.
forms[1]['DELETE'] = 1
self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(self.version.files.count(), 1)
def add_in_bsd(self):
f = self.version.files.get()
# The default file is All, which prevents the addition of more files.
f.update(platform=amo.PLATFORM_MAC.id)
return File.objects.create(version=self.version,
platform=amo.PLATFORM_BSD.id)
def get_platforms(self, form):
return [amo.PLATFORMS[i[0]].shortname
for i in form.fields['platform'].choices]
# The unsupported platform tests are for legacy addons. We don't
# want new addons uploaded with unsupported platforms but the old files can
# still be edited.
def test_all_unsupported_platforms(self):
self.add_in_bsd()
forms = self.client.get(self.url).context['file_form'].forms
choices = self.get_platforms(forms[1])
assert 'bsd' in choices, (
'After adding a BSD file, expected its platform to be '
'available in: %r' % choices)
def test_all_unsupported_platforms_unchange(self):
bsd = self.add_in_bsd()
forms = self.client.get(self.url).context['file_form'].forms
forms = map(initial, forms)
self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(File.objects.no_cache().get(pk=bsd.pk).platform,
amo.PLATFORM_BSD.id)
def test_all_unsupported_platforms_change(self):
bsd = self.add_in_bsd()
forms = self.client.get(self.url).context['file_form'].forms
forms = map(initial, forms)
# Update the file platform to Linux:
forms[1]['platform'] = amo.PLATFORM_LINUX.id
self.client.post(self.url, self.formset(*forms, prefix='files'))
eq_(File.objects.no_cache().get(pk=bsd.pk).platform,
amo.PLATFORM_LINUX.id)
forms = self.client.get(self.url).context['file_form'].forms
choices = self.get_platforms(forms[1])
assert 'bsd' not in choices, (
'After changing BSD file to Linux, BSD should no longer be a '
'platform choice in: %r' % choices)
def test_add_file_modal(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
# Make sure radio buttons are visible:
eq_(doc('.platform ul label').text(), 'Linux Mac OS X Windows')
eq_(set([i.attrib['type'] for i in doc('input.platform')]),
set(['radio']))
def test_mobile_addon_supports_only_mobile_platforms(self):
for a in self.version.apps.all():
a.application = amo.ANDROID.id
a.save()
self.version.files.all().update(platform=amo.PLATFORM_ANDROID.id)
forms = self.client.get(self.url).context['file_form'].forms
choices = self.get_platforms(forms[0])
eq_(sorted(choices),
sorted([p.shortname for p in amo.MOBILE_PLATFORMS.values()]))
class TestPlatformSearch(TestVersionEditMixin, amo.tests.BaseTestCase):
fixtures = ['base/users', 'base/thunderbird', 'base/addon_4594_a9.json']
def setUp(self):
super(TestPlatformSearch, self).setUp()
self.client.login(username='admin@mozilla.com', password='password')
self.url = reverse('devhub.versions.edit',
args=['a4594', 42352])
self.version = Version.objects.get(id=42352)
self.file = self.version.files.all()[0]
def test_no_platform_search_engine(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('#id_files-0-platform')
def test_changing_platform_search_engine(self):
dd = self.formset({'id': int(self.file.pk),
'platform': amo.PLATFORM_LINUX.id},
prefix='files', releasenotes='xx',
approvalnotes='yy')
response = self.client.post(self.url, dd)
eq_(response.status_code, 302)
file_ = Version.objects.no_cache().get(id=42352).files.all()[0]
eq_(amo.PLATFORM_ALL.id, file_.platform)
class TestVersionEditCompat(TestVersionEditBase):
def get_form(self, url=None):
if not url:
url = self.url
av = self.version.apps.get()
eq_(av.min.version, '2.0')
eq_(av.max.version, '4.0')
f = self.client.get(url).context['compat_form'].initial_forms[0]
return initial(f)
def formset(self, *args, **kw):
defaults = formset(prefix='files')
defaults.update(kw)
return super(TestVersionEditCompat, self).formset(*args, **defaults)
def test_add_appversion(self):
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
d = self.formset(initial(f), dict(application=18, min=288, max=298),
initial_count=1)
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
apps = self.get_version().compatible_apps.keys()
eq_(sorted(apps), sorted([amo.FIREFOX, amo.THUNDERBIRD]))
eq_(list(ActivityLog.objects.all().values_list('action')),
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)])
def test_update_appversion(self):
d = self.get_form()
d.update(min=self.v1.id, max=self.v5.id)
r = self.client.post(self.url,
self.formset(d, initial_count=1))
eq_(r.status_code, 302)
av = self.version.apps.get()
eq_(av.min.version, '1.0')
eq_(av.max.version, '5.0')
eq_(list(ActivityLog.objects.all().values_list('action')),
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)])
def test_ajax_update_appversion(self):
url = reverse('devhub.ajax.compat.update',
args=['a3615', self.version.id])
d = self.get_form(url)
d.update(min=self.v1.id, max=self.v5.id)
r = self.client.post(url, self.formset(d, initial_count=1))
eq_(r.status_code, 200)
av = self.version.apps.get()
eq_(av.min.version, '1.0')
eq_(av.max.version, '5.0')
eq_(list(ActivityLog.objects.all().values_list('action')),
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)])
def test_delete_appversion(self):
# Add thunderbird compat so we can delete firefox.
self.test_add_appversion()
f = self.client.get(self.url).context['compat_form']
d = map(initial, f.initial_forms)
d[0]['DELETE'] = True
r = self.client.post(self.url, self.formset(*d, initial_count=2))
eq_(r.status_code, 302)
apps = self.get_version().compatible_apps.keys()
eq_(apps, [amo.THUNDERBIRD])
eq_(list(ActivityLog.objects.all().values_list('action')),
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)])
def test_unique_apps(self):
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
dupe = initial(f)
del dupe['id']
d = self.formset(initial(f), dupe, initial_count=1)
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
# Because of how formsets work, the second form is expected to be a
# tbird version range. We got an error, so we're good.
def test_require_appversion(self):
old_av = self.version.apps.get()
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
d = initial(f)
d['DELETE'] = True
r = self.client.post(self.url, self.formset(d, initial_count=1))
eq_(r.status_code, 200)
eq_(r.context['compat_form'].non_form_errors(),
['Need at least one compatible application.'])
eq_(self.version.apps.get(), old_av)
def test_proper_min_max(self):
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
d = initial(f)
d['min'], d['max'] = d['max'], d['min']
r = self.client.post(self.url, self.formset(d, initial_count=1))
eq_(r.status_code, 200)
eq_(r.context['compat_form'].forms[0].non_field_errors(),
['Invalid version range.'])
def test_same_min_max(self):
f = self.client.get(self.url).context['compat_form'].initial_forms[0]
d = initial(f)
d['min'] = d['max']
r = self.client.post(self.url, self.formset(d, initial_count=1))
eq_(r.status_code, 302)
av = self.version.apps.all()[0]
eq_(av.min, av.max)
|
jpetto/olympia
|
src/olympia/devhub/tests/test_views_versions.py
|
Python
|
bsd-3-clause
| 37,068
|
# Copyright 2020 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Tests for bsst.evaluation."""
from absl.testing import absltest
import numpy as np
import xarray as xr
from bsst import evaluation
def day(n):
"""October n, 2020."""
return np.datetime64(f'2020-10-{n:02}')
class EvaluationTest(absltest.TestCase):
def test_success_day(self):
time = np.arange(day(1), day(9), np.timedelta64(1, 'D'))
cum_events = np.array([[1, 4, 9, 16, 25, 36, 49, 64],
[1, 2, 3, 4, 5, 6, 7, 8],
[2, 4, 8, 16, 32, 64, 128, 256]])
cum_events = xr.DataArray(
cum_events, dims=('scenario', 'time'), coords=([0, 1, 2], time))
control_arm_events = cum_events - cum_events.shift(time=1, fill_value=0)
# Check basic functionality when needed events is a singleton.
needed_events = 3
success_day = evaluation.success_day(needed_events, control_arm_events)
expected = np.array([day(2), day(3), day(2)])
np.testing.assert_equal(success_day, expected)
# Check basic functionality when needed events is a list.
needed_events = [10, 50]
success_day = evaluation.success_day(needed_events, control_arm_events)
expected = np.array([[day(4), day(9), day(4)], [day(8), day(9), day(6)]])
np.testing.assert_equal(success_day, expected.T)
if __name__ == '__main__':
absltest.main()
|
verilylifesciences/site-selection-tool
|
bsst/evaluation_test.py
|
Python
|
bsd-3-clause
| 1,526
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['MovingAverage'] , ['NoCycle'] , ['MLP'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_MovingAverage_NoCycle_MLP.py
|
Python
|
bsd-3-clause
| 151
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
from ..x3 import XArm, Studio
class XArmAPI(object):
def __init__(self, port=None, is_radian=False, do_not_open=False, **kwargs):
"""
The API wrapper of xArm
Note: Orientation of attitude angle
roll: rotate around the X axis
pitch: rotate around the Y axis
yaw: rotate around the Z axis
:param port: ip-address(such as '192.168.1.185')
Note: this parameter is required if parameter do_not_open is False
:param is_radian: set the default unit is radians or not, default is False
Note: (aim of design)
1. Default value for unified interface parameters
2: Unification of the external unit system
3. For compatibility with previous interfaces
Note: the conversion of degree (°) and radians (rad)
* 1 rad == 57.29577951308232 °
* 1 ° == 0.017453292519943295 rad
* 1 rad/s == 57.29577951308232 °/s
* 1 °/s == 0.017453292519943295 rad/s
* 1 rad/s^2 == 57.29577951308232 °/s^2
* 1 °/s^2 == 0.017453292519943295 rad/s^2
* 1 rad/s^3 == 57.29577951308232 °/s^3
* 1 °/s^3 == 0.017453292519943295 rad/s^3
Note: This parameter determines the value of the property self.default_is_radian
Note: This parameter determines the default value of the interface with the is_radian/input_is_radian/return_is_radian parameter
The list of affected interfaces is as follows:
1. method: get_position
2. method: set_position
3. method: get_servo_angle
4. method: set_servo_angle
5. method: set_servo_angle_j
6. method: move_gohome
7. method: reset
8. method: set_tcp_offset
9. method: set_joint_jerk
10. method: set_joint_maxacc
11. method: get_inverse_kinematics
12. method: get_forward_kinematics
13. method: is_tcp_limit
14. method: is_joint_limit
15. method: get_params
16: method: move_arc_lines
17: method: move_circle
18: method: set_servo_cartesian
Note: This parameter determines the default return type for some interfaces (such as the position, velocity, and acceleration associated with the return angle arc).
The affected attributes are as follows:
1. property: position
2. property: last_used_position
3. property: angles
4. property: last_used_angles
5. property: last_used_joint_speed
6. property: last_used_joint_acc
7. property: tcp_offset
:param do_not_open: do not open, default is False, if true, you need to manually call the connect interface.
:param kwargs: keyword parameters, generally do not need to set
axis: number of axes, required only when using a serial port connection, default is 7
baudrate: serial baudrate, invalid, reserved.
timeout: serial timeout, invalid, reserved.
filters: serial port filters, invalid, reserved.
check_tcp_limit: check the tcp param value out of limit or not, default is False
Note: only check the param roll/pitch/yaw of the interface `set_position`/`move_arc_lines`
check_joint_limit: check the joint param value out of limit or not, default is True
Note: only check the param angle of the interface `set_servo_angle` and the param angles of the interface `set_servo_angle_j`
check_cmdnum_limit: check the cmdnum out of limit or not, default is True
max_cmdnum: max cmdnum, default is 256
Note: only available in the param `check_cmdnum_limit` is True
check_is_ready: check if the arm is in motion, default is True
"""
self._arm = XArm(port=port,
is_radian=is_radian,
do_not_open=do_not_open,
instance=self,
**kwargs)
self._studio = Studio(port, True)
self.__attr_alias_map = {
'get_ik': self.get_inverse_kinematics,
'get_fk': self.get_forward_kinematics,
'set_sleep_time': self.set_pause_time,
'register_maable_mtbrake_changed_callback': self.register_mtable_mtbrake_changed_callback,
'release_maable_mtbrake_changed_callback': self.release_mtable_mtbrake_changed_callback,
'position_offset': self.tcp_offset,
'get_gpio_digital': self.get_tgpio_digital,
'set_gpio_digital': self.set_tgpio_digital,
'get_gpio_analog': self.get_tgpio_analog,
'set_fense_mode': self.set_fence_mode,
'get_suction_cup': self.get_vacuum_gripper,
'set_suction_cup': self.set_vacuum_gripper,
}
def __getattr__(self, item):
if item in self.__attr_alias_map.keys():
return self.__attr_alias_map[item]
raise AttributeError('\'{}\' has not attribute \'{}\''.format(self.__class__.__name__, item))
@property
def arm(self):
return self._arm
@property
def core(self):
"""
Core layer API, set only for advanced developers, please do not use
Ex:
self.core.move_line(...)
self.core.move_lineb(...)
self.core.move_joint(...)
...
"""
return self._arm.arm_cmd
@property
def count(self):
"""
Counter val
"""
return self._arm.count
@property
def realtime_tcp_speed(self):
"""
The real time speed of tcp motion, only available if version > 1.2.11
:return: real time speed (mm/s)
"""
return self._arm.realtime_tcp_speed
@property
def realtime_joint_speeds(self):
"""
The real time speed of joint motion, only available if version > 1.2.11
:return: [joint-1-speed(°/s or rad/s), ...., joint-7-speed(°/s or rad/s)]
"""
return self._arm.realtime_joint_speeds
@property
def gpio_reset_config(self):
"""
The gpio reset enable config
:return: [cgpio_reset_enable, tgpio_reset_enable]
"""
return self._arm.gpio_reset_config
@property
def version_number(self):
"""
Frimware version number
:return: (major_version_number, minor_version_number, revision_version_number)
"""
return self._arm.version_number
@property
def connected(self):
"""
Connection status
"""
return self._arm.connected
@property
def default_is_radian(self):
"""
The default unit is radians or not
"""
return self._arm.default_is_radian
@property
def version(self):
"""
xArm version
"""
return self._arm.version
@property
def sn(self):
"""
xArm sn
"""
return self._arm.sn
@property
def control_box_sn(self):
"""
Control box sn
"""
return self._arm.control_box_sn
@property
def position(self):
"""
Cartesion position
Note:
1. If self.default_is_radian is True, the returned value (only roll/pitch/yaw) is in radians
return: [x(mm), y(mm), z(mm), roll(° or rad), pitch(° or rad), yaw(° or rad)]
"""
return self._arm.position
@property
def position_aa(self):
"""
The pose represented by the axis angle pose
Note:
1. If self.default_is_radian is True, the returned value (only roll/pitch/yaw) is in radians
:return: [x(mm), y(mm), z(mm), rx(° or rad), ry(° or rad), rz(° or rad)]
"""
return self._arm.position_aa
@property
def last_used_position(self):
"""
The last used cartesion position, default value of parameter x/y/z/roll/pitch/yaw of interface set_position
Note:
1. If self.default_is_radian is True, the returned value (only roll/pitch/yaw) is in radians
2. self.set_position(x=300) <==> self.set_position(x=300, *last_used_position[1:])
2. self.set_position(roll=-180) <==> self.set_position(x=self.last_used_position[:3], roll=-180, *self.last_used_position[4:])
:return: [x(mm), y(mm), z(mm), roll(° or rad), pitch(° or rad), yaw(° or rad)]
"""
return self._arm.last_used_position
@property
def tcp_jerk(self):
"""
Tcp jerk
:return: jerk (mm/s^3)
"""
return self._arm.tcp_jerk
@property
def tcp_speed_limit(self):
"""
Tcp speed limit, only available in socket way and enable_report is True and report_type is 'rich'
:return: [min_tcp_speed(mm/s), max_tcp_speed(mm/s)]
"""
return self._arm.tcp_speed_limit
@property
def tcp_acc_limit(self):
"""
Tcp acceleration limit, only available in socket way and enable_report is True and report_type is 'rich'
:return: [min_tcp_acc(mm/s^2), max_tcp_acc(mm/s^2)]
"""
return self._arm.tcp_acc_limit
@property
def last_used_tcp_speed(self):
"""
The last used cartesion speed, default value of parameter speed of interface set_position/move_circle
:return: speed (mm/s)
"""
return self._arm.last_used_tcp_speed
@property
def last_used_tcp_acc(self):
"""
The last used cartesion acceleration, default value of parameter mvacc of interface set_position/move_circle
:return: acceleration (mm/s^2)
"""
return self._arm.last_used_tcp_acc
@property
def angles(self):
"""
Servo angles
Note:
1. If self.default_is_radian is True, the returned value is in radians
:return: [angle1(° or rad), angle2(° or rad), ..., anglen7(° or rad)]
"""
return self._arm.angles
@property
def joint_jerk(self):
"""
Joint jerk
Note:
1. If self.default_is_radian is True, the returned value is in radians
:return: jerk (°/s^3 or rad/s^3)
"""
return self._arm.joint_jerk
@property
def joint_speed_limit(self):
"""
Joint speed limit, only available in socket way and enable_report is True and report_type is 'rich'
Note:
1. If self.default_is_radian is True, the returned value is in radians
:return: [min_joint_speed(°/s or rad/s), max_joint_speed(°/s or rad/s)]
"""
return self._arm.joint_speed_limit
@property
def joint_acc_limit(self):
"""
Joint acceleration limit, only available in socket way and enable_report is True and report_type is 'rich'
Note:
1. If self.default_is_radian is True, the returned value is in radians
:return: [min_joint_acc(°/s^2 or rad/s^2), max_joint_acc(°/s^2 or rad/s^2)]
"""
return self._arm.joint_acc_limit
@property
def last_used_angles(self):
"""
The last used servo angles, default value of parameter angle of interface set_servo_angle
Note:
1. If self.default_is_radian is True, the returned value is in radians
2. self.set_servo_angle(servo_id=1, angle=75) <==> self.set_servo_angle(angle=[75] + self.last_used_angles[1:])
3. self.set_servo_angle(servo_id=5, angle=30) <==> self.set_servo_angle(angle=self.last_used_angles[:4] + [30] + self.last_used_angles[5:])
:return: [angle1(° or rad), angle2(° or rad), ..., angle7(° or rad)]
"""
return self._arm.last_used_angles
@property
def last_used_joint_speed(self):
"""
The last used joint speed, default value of parameter speed of interface set_servo_angle
Note:
1. If self.default_is_radian is True, the returned value is in radians
:return: speed (°/s or rad/s)
"""
return self._arm.last_used_joint_speed
@property
def last_used_joint_acc(self):
"""
The last used joint acceleration, default value of parameter mvacc of interface set_servo_angle
Note:
1. If self.default_is_radian is True, the returned value is in radians
:return: acceleration (°/s^2 or rad/s^2)
"""
return self._arm.last_used_joint_acc
@property
def tcp_offset(self):
"""
Cartesion position offset, only available in socket way and enable_report is True
Note:
1. If self.default_is_radian is True, the returned value(roll_offset/pitch_offset/yaw_offset) is in radians
:return: [x_offset(mm), y_offset(mm), z_offset(mm), roll_offset(° or rad), pitch_offset(° or rad), yaw_offset(° or rad)]
"""
return self._arm.position_offset
@property
def world_offset(self):
"""
Base coordinate offset, only available if version > 1.2.11
Note:
1. If self.default_is_radian is True, the returned value(roll_offset/pitch_offset/yaw_offset) is in radians
:return: [x_offset(mm), y_offset(mm), z_offset(mm), roll_offset(° or rad), pitch_offset(° or rad), yaw_offset(° or rad)]
"""
return self._arm.world_offset
@property
def state(self):
"""
xArm state
:return:
1: in motion
2: sleeping
3: suspended
4: stopping
"""
return self._arm.state
@property
def mode(self):
"""
xArm mode,only available in socket way and enable_report is True
:return:
0: position control mode
1: servo motion mode
2: joint teaching mode
3: cartesian teaching mode (invalid)
4: joint velocity control mode
5: cartesian velocity control mode
"""
return self._arm.mode
@property
def is_simulation_robot(self):
"""
Is simulation robot not not
"""
return self._arm.is_simulation_robot
@property
def joints_torque(self):
"""
Joints torque, only available in socket way and enable_report is True and report_type is 'rich'
:return: [joint-1, ....]
"""
return self._arm.joints_torque
@property
def tcp_load(self):
"""
xArm tcp load, only available in socket way and enable_report is True and report_type is 'rich'
:return: [weight, center of gravity]
such as: [weight(kg), [x(mm), y(mm), z(mm)]]
"""
return self._arm.tcp_load
@property
def collision_sensitivity(self):
"""
The sensitivity value of collision, only available in socket way and enable_report is True and report_type is 'rich'
:return: 0~5
"""
return self._arm.collision_sensitivity
@property
def teach_sensitivity(self):
"""
The sensitivity value of drag and teach, only available in socket way and enable_report is True and report_type is 'rich'
:return: 1~5
"""
return self._arm.teach_sensitivity
@property
def motor_brake_states(self):
"""
Motor brake state list, only available in socket way and enable_report is True and report_type is 'rich'
Note:
For a robot with a number of axes n, only the first n states are valid, and the latter are reserved.
:return: [motor-1-brake-state, ..., motor-7-brake-state, reserved]
motor-{i}-brake-state:
0: enable
1: disable
"""
return self._arm.motor_brake_states
@property
def motor_enable_states(self):
"""
Motor enable state list, only available in socket way and enable_report is True and report_type is 'rich'
Note:
For a robot with a number of axes n, only the first n states are valid, and the latter are reserved.
:return: [motor-1-enable-state, ..., motor-7-enable-state, reserved]
motor-{i}-enable-state:
0: disable
1: enable
"""
return self._arm.motor_enable_states
@property
def temperatures(self):
"""
Motor temperature, only available if version > 1.2.11
:return: [motor-1-temperature, ..., motor-7-temperature]
"""
return self._arm.temperatures
@property
def has_err_warn(self):
"""
Contorller have an error or warning or not
:return: True/False
"""
return self._arm.has_err_warn
@property
def has_error(self):
"""
Controller have an error or not
"""
return self._arm.has_error
@property
def has_warn(self):
"""
Controller have an warnning or not
"""
return self._arm.has_warn
@property
def error_code(self):
"""
Controller error code. See Chapter 7 of the xArm User Manual for details.
"""
return self._arm.error_code
@property
def warn_code(self):
"""
Controller warn code. See Chapter 7 of the xArm User Manual for details.
"""
return self._arm.warn_code
@property
def cmd_num(self):
"""
Number of command caches in the controller
"""
return self._arm.cmd_num
@property
def device_type(self):
"""
Device type, only available in socket way and enable_report is True and report_type is 'rich'
"""
return self._arm.device_type
@property
def axis(self):
"""
Axis number, only available in socket way and enable_report is True and report_type is 'rich'
"""
return self._arm.axis
@property
def master_id(self):
"""
Master id, only available in socket way and enable_report is True and report_type is 'rich'
"""
return self._arm.master_id
@property
def slave_id(self):
"""
Slave id, only available in socket way and enable_report is True and report_type is 'rich'
"""
return self._arm.slave_id
@property
def gravity_direction(self):
"""
gravity direction, only available in socket way and enable_report is True and report_type is 'rich'
:return:
"""
return self._arm.gravity_direction
@property
def servo_codes(self):
"""
Servos status and error_code
:return: [
[servo-1-status, servo-1-code],
...,
[servo-7-status, servo-7-code],
[tool-gpio-status, tool-gpio-code]
]
"""
return self._arm.servo_codes
@property
def voltages(self):
"""
Servos voltage
:return: [servo-1-voltage, ..., servo-7-voltage]
"""
return self._arm.voltages
@property
def currents(self):
"""
Servos electric current
:return: [servo-1-current, ..., servo-7-current]
"""
return self._arm.currents
@property
def cgpio_states(self):
"""
Controller gpio state
:return: states
states[0]: contorller gpio module state
states[0] == 0: normal
states[0] == 1:wrong
states[0] == 6:communication failure
states[1]: controller gpio module error code
states[1] == 0: normal
states[1] != 0:error code
states[2]: digital input functional gpio state
Note: digital-i-input functional gpio state = states[2] >> i & 0x01
states[3]: digital input configuring gpio state
Note: digital-i-input configuring gpio state = states[3] >> i & 0x01
states[4]: digital output functional gpio state
Note: digital-i-output functional gpio state = states[4] >> i & 0x01
states[5]: digital output configuring gpio state
Note: digital-i-output configuring gpio state = states[5] >> i & 0x01
states[6]: analog-0 input value
states[7]: analog-1 input value
states[8]: analog-0 output value
states[9]: analog-1 output value
states[10]: digital input functional info, [digital-0-input-functional-mode, ... digital-7-input-functional-mode]
states[11]: digital output functional info, [digital-0-output-functional-mode, ... digital-7-output-functional-mode]
"""
return self._arm.cgpio_states
@property
def self_collision_params(self):
"""
Self collision params
:return: params
params[0]: self collision detection or not
params[1]: self collision tool type
params[2]: self collision model params
"""
return self._arm.self_collision_params
@property
def ft_ext_force(self):
return self._arm.ft_ext_force
@property
def ft_raw_force(self):
return self._arm.ft_raw_force
def connect(self, port=None, baudrate=None, timeout=None, axis=None, **kwargs):
"""
Connect to xArm
:param port: port name or the ip address, default is the value when initializing an instance
:param baudrate: baudrate, only available in serial way, default is the value when initializing an instance
:param timeout: timeout, only available in serial way, default is the value when initializing an instance
:param axis: number of axes, required only when using a serial port connection, default is 7
"""
self._arm.connect(port=port, baudrate=baudrate, timeout=timeout, axis=axis, **kwargs)
def disconnect(self):
"""
Disconnect
"""
self._arm.disconnect()
def send_cmd_sync(self, command=None):
"""
Send cmd and wait (only waiting the cmd response, not waiting for the movement)
Note:
1. Some command depends on self.default_is_radian
:param command:
'G1': 'set_position(MoveLine): G1 X{x} Y{y} Z{z} A{roll} B{pitch} C{yaw} F{speed} Q{acc} T{mvtime}'
'G2': 'move_circle: G2 X{x1} Y{y1} Z{z1} A{roll1} B{pitch1} C{yaw1} I{x2} J{y2} K{z2} L{roll2} M{pitch2} N{yaw2} F{speed} Q{acc} T{mvtime}'
'G4': 'set_pause_time: G4 T{second}'
'G7': 'set_servo_angle: G7 I{servo_1} J{servo_2} K{servo_3} L{servo_4} M{servo_5} N{servo_6} O{servo_7} F{speed} Q{acc} T{mvtime}'
'G8': 'move_gohome: G8 F{speed} Q{acc} T{mvtime}'
'G9': 'set_position(MoveArcLine): G9 X{x} Y{y} Z{z} A{roll} B{pitch} C{yaw} R{radius} F{speed} Q{acc} T{mvtime}'
'G11': 'set_servo_angle_j: G11 I{servo_1} J{servo_2} K{servo_3} L{servo_4} M{servo_5} N{servo_6} O{servo_7} F{speed} Q{acc} T{mvtime}'
'H1': 'get_version: H1'
'H11': 'motion_enable: H11 I{servo_id} V{enable}'
'H12': 'set_state: H12 V{state}'
'H13': 'get_state: H13'
'H14': 'get_cmdnum: H14'
'H15': 'get_err_warn_code: H15'
'H16': 'clean_error: H16'
'H17': 'clean_warn: H17'
'H18': 'set_servo_attach/set_servo_detach: H18 I{servo_id} V{1: enable(detach), 0: disable(attach)}'
'H19': 'set_mode: H19 V{mode}'
'H31': 'set_tcp_jerk: H31 V{jerk(mm/s^3)}'
'H32': 'set_tcp_maxacc: H32 V{maxacc(mm/s^2)}'
'H33': 'set_joint_jerk: H33 V{jerk(°/s^3 or rad/s^3)}'
'H34': 'set_joint_maxacc: H34 {maxacc(°/s^2 or rad/s^2)}'
'H35': 'set_tcp_offset: H35 X{x} Y{y} Z{z} A{roll} B{pitch} C{yaw}'
'H36': 'set_tcp_load: H36 I{weight} J{center_x} K{center_y} L{center_z}'
'H37': 'set_collision_sensitivity: H37 V{sensitivity}'
'H38': 'set_teach_sensitivity: H38 V{sensitivity}'
'H39': 'clean_conf: H39'
'H40': 'save_conf: H40'
'H41': 'get_position: H41'
'H42': 'get_servo_angle: H42'
'H43': 'get_inverse_kinematics: H43 X{x} Y{y} Z{z} A{roll} B{pitch} C{yaw}'
'H44': 'get_forward_kinematics: H44 I{servo_1} J{servo_2} K{servo_3} L{servo_4} M{servo_5} N{servo_6} O{servo_7}'
'H45': 'is_joint_limit: H45 I{servo_1} J{servo_2} K{servo_3} L{servo_4} M{servo_5} N{servo_6} O{servo_7}'
'H46': 'is_tcp_limit: H46 X{x} Y{y} Z{z} A{roll} B{pitch} C{yaw}'
'H51': 'set_gravity_direction: H51 X{x} Y{y} Z{z}'
'H106': 'get_servo_debug_msg: H106'
'M116': 'set_gripper_enable: M116 V{enable}'
'M117': 'set_gripper_mode: M117 V{mode}'
'M119': 'get_gripper_position: M119'
'M120': 'set_gripper_position: M120 V{pos}'
'M121': 'set_gripper_speed: M116 V{speed}'
'M125': 'get_gripper_err_code: M125'
'M126': 'clean_gripper_error: M126'
'M131': 'get_tgpio_digital: M131'
'M132': 'set_tgpio_digital: M132 I{ionum} V{value}'
'M133': 'get_tgpio_analog, default ionum=0: M133 I{ionum=0}'
'M134': 'get_tgpio_analog, default ionum=1: M134 I{ionum=1}'
'C131': 'get_cgpio_digital: C131'
'C132': 'get_cgpio_analog, default ionum=0: C132 I{ionum=0}'
'C133': 'get_cgpio_analog, default ionum=1: C133 I{ionum=1}'
'C134': 'set_cgpio_digital: C134 I{ionum} V{value}'
'C135': 'set_cgpio_analog, default ionum=0: C135 I{ionum=0} V{value}'
'C136': 'set_cgpio_analog, default ionum=1: C136 I{ionum=1} V{value}'
'C137': 'set_cgpio_digital_input_function: C137 I{ionum} V{fun}'
'C138': 'set_cgpio_digital_output_function: C138 I{ionum} V{fun}'
'C139': 'get_cgpio_state: C139'
:return: code or tuple((code, ...))
code: See the API code documentation for details.
"""
return self._arm.send_cmd_sync(command=command)
def get_position(self, is_radian=None):
"""
Get the cartesian position
Note:
1. If the value(roll/pitch/yaw) you want to return is an radian unit, please set the parameter is_radian to True
ex: code, pos = arm.get_position(is_radian=True)
:param is_radian: the returned value (only roll/pitch/yaw) is in radians or not, default is self.default_is_radian
:return: tuple((code, [x, y, z, roll, pitch, yaw])), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_position(is_radian=is_radian)
def set_position(self, x=None, y=None, z=None, roll=None, pitch=None, yaw=None, radius=None,
speed=None, mvacc=None, mvtime=None, relative=False, is_radian=None,
wait=False, timeout=None, **kwargs):
"""
Set the cartesian position, the API will modify self.last_used_position value
Note:
1. If it is xArm5, ensure that the current robotic arm has a roll value of 180° or π rad and has a roll value of 0 before calling this interface.
2. If it is xArm5, roll must be set to 180° or π rad, pitch must be set to 0
3. If the parameter(roll/pitch/yaw) you are passing is an radian unit, be sure to set the parameter is_radian to True.
ex: code = arm.set_position(x=300, y=0, z=200, roll=-3.14, pitch=0, yaw=0, is_radian=True)
4. If you want to wait for the robot to complete this action and then return, please set the parameter wait to True.
ex: code = arm.set_position(x=300, y=0, z=200, roll=180, pitch=0, yaw=0, is_radian=False, wait=True)
5. This interface is only used in the base coordinate system.
:param x: cartesian position x, (unit: mm), default is self.last_used_position[0]
:param y: cartesian position y, (unit: mm), default is self.last_used_position[1]
:param z: cartesian position z, (unit: mm), default is self.last_used_position[2]
:param roll: rotate around the X axis, (unit: rad if is_radian is True else °), default is self.last_used_position[3]
:param pitch: rotate around the Y axis, (unit: rad if is_radian is True else °), default is self.last_used_position[4]
:param yaw: rotate around the Z axis, (unit: rad if is_radian is True else °), default is self.last_used_position[5]
:param radius: move radius, if radius is None or radius less than 0, will MoveLine, else MoveArcLine
MoveLine: Linear motion
ex: code = arm.set_position(..., radius=None)
MoveArcLine: Linear arc motion with interpolation
ex: code = arm.set_position(..., radius=0)
Note: Need to set radius>=0
:param speed: move speed (mm/s, rad/s), default is self.last_used_tcp_speed
:param mvacc: move acceleration (mm/s^2, rad/s^2), default is self.last_used_tcp_acc
:param mvtime: 0, reserved
:param relative: relative move or not
:param is_radian: the roll/pitch/yaw in radians or not, default is self.default_is_radian
:param wait: whether to wait for the arm to complete, default is False
:param timeout: maximum waiting time(unit: second), default is None(no timeout), only valid if wait is True
:param kwargs: reserved
:return: code
code: See the API code documentation for details.
code < 0: the last_used_position/last_used_tcp_speed/last_used_tcp_acc will not be modified
code >= 0: the last_used_position/last_used_tcp_speed/last_used_tcp_acc will be modified
"""
return self._arm.set_position(x=x, y=y, z=z, roll=roll, pitch=pitch, yaw=yaw, radius=radius,
speed=speed, mvacc=mvacc, mvtime=mvtime, relative=relative,
is_radian=is_radian, wait=wait, timeout=timeout, **kwargs)
def set_tool_position(self, x=0, y=0, z=0, roll=0, pitch=0, yaw=0,
speed=None, mvacc=None, mvtime=None, is_radian=None,
wait=False, timeout=None, **kwargs):
"""
Movement relative to the tool coordinate system
Note:
1. This interface is moving relative to the current tool coordinate system
2. The tool coordinate system is not fixed and varies with position.
3. This interface is only used in the tool coordinate system.
:param x: the x coordinate relative to the current tool coordinate system, (unit: mm), default is 0
:param y: the y coordinate relative to the current tool coordinate system, (unit: mm), default is 0
:param z: the z coordinate relative to the current tool coordinate system, (unit: mm), default is 0
:param roll: the rotate around the X axis relative to the current tool coordinate system, (unit: rad if is_radian is True else °), default is 0
:param pitch: the rotate around the Y axis relative to the current tool coordinate system, (unit: rad if is_radian is True else °), default is 0
:param yaw: the rotate around the Z axis relative to the current tool coordinate system, (unit: rad if is_radian is True else °), default is 0
:param speed: move speed (mm/s, rad/s), default is self.last_used_tcp_speed
:param mvacc: move acceleration (mm/s^2, rad/s^2), default is self.last_used_tcp_acc
:param mvtime: 0, reserved
:param is_radian: the roll/pitch/yaw in radians or not, default is self.default_is_radian
:param wait: whether to wait for the arm to complete, default is False
:param timeout: maximum waiting time(unit: second), default is None(no timeout), only valid if wait is True
:param kwargs: reserved
:return: code
code: See the API code documentation for details.
code < 0: the last_used_tcp_speed/last_used_tcp_acc will not be modified
code >= 0: the last_used_tcp_speed/last_used_tcp_acc will be modified
"""
return self._arm.set_tool_position(x=x, y=y, z=z, roll=roll, pitch=pitch, yaw=yaw,
speed=speed, mvacc=mvacc, mvtime=mvtime,
is_radian=is_radian, wait=wait, timeout=timeout, **kwargs)
def get_servo_angle(self, servo_id=None, is_radian=None):
"""
Get the servo angle
Note:
1. If the value you want to return is an radian unit, please set the parameter is_radian to True
ex: code, angles = arm.get_servo_angle(is_radian=True)
2. If you want to return only the angle of a single joint, please set the parameter servo_id
ex: code, angle = arm.get_servo_angle(servo_id=2)
3. This interface is only used in the base coordinate system.
:param servo_id: 1-(Number of axes), None(8), default is None
:param is_radian: the returned value is in radians or not, default is self.default_is_radian
:return: tuple((code, angle list if servo_id is None or 8 else angle)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_servo_angle(servo_id=servo_id, is_radian=is_radian)
def set_servo_angle(self, servo_id=None, angle=None, speed=None, mvacc=None, mvtime=None,
relative=False, is_radian=None, wait=False, timeout=None, radius=None, **kwargs):
"""
Set the servo angle, the API will modify self.last_used_angles value
Note:
1. If the parameter angle you are passing is an radian unit, be sure to set the parameter is_radian to True.
ex: code = arm.set_servo_angle(servo_id=1, angle=1.57, is_radian=True)
2. If you want to wait for the robot to complete this action and then return, please set the parameter wait to True.
ex: code = arm.set_servo_angle(servo_id=1, angle=45, is_radian=False,wait=True)
3. This interface is only used in the base coordinate system.
:param servo_id: 1-(Number of axes), None(8)
1. 1-(Number of axes) indicates the corresponding joint, the parameter angle should be a numeric value
ex: code = arm.set_servo_angle(servo_id=1, angle=45, is_radian=False)
2. None(8) means all joints, default is None, the parameter angle should be a list of values whose length is the number of joints
ex: code = arm.set_servo_angle(angle=[30, -45, 0, 0, 0, 0, 0], is_radian=False)
:param angle: angle or angle list, (unit: rad if is_radian is True else °)
1. If servo_id is 1-(Number of axes), angle should be a numeric value
ex: code = arm.set_servo_angle(servo_id=1, angle=45, is_radian=False)
2. If servo_id is None or 8, angle should be a list of values whose length is the number of joints
like [axis-1, axis-2, axis-3, axis-3, axis-4, axis-5, axis-6, axis-7]
ex: code = arm.set_servo_angle(angle=[30, -45, 0, 0, 0, 0, 0], is_radian=False)
:param speed: move speed (unit: rad/s if is_radian is True else °/s), default is self.last_used_joint_speed
:param mvacc: move acceleration (unit: rad/s^2 if is_radian is True else °/s^2), default is self.last_used_joint_acc
:param mvtime: 0, reserved
:param relative: relative move or not
:param is_radian: the angle in radians or not, default is self.default_is_radian
:param wait: whether to wait for the arm to complete, default is False
:param timeout: maximum waiting time(unit: second), default is None(no timeout), only valid if wait is True
:param radius: move radius, if radius is None or radius less than 0, will MoveJoint, else MoveArcJoint
Note: Only available if version > 1.5.20
Note: The blending radius cannot be greater than the track length.
MoveJoint: joint motion
ex: code = arm.set_servo_angle(..., radius=None)
MoveArcJoint: joint fusion motion with interpolation
ex: code = arm.set_servo_angle(..., radius=0)
Note: Need to set radius>=0
:param kwargs: reserved
:return: code
code: See the API code documentation for details.
code < 0: the last_used_angles/last_used_joint_speed/last_used_joint_acc will not be modified
code >= 0: the last_used_angles/last_used_joint_speed/last_used_joint_acc will be modified
"""
return self._arm.set_servo_angle(servo_id=servo_id, angle=angle, speed=speed, mvacc=mvacc, mvtime=mvtime,
relative=relative, is_radian=is_radian, wait=wait, timeout=timeout, radius=radius, **kwargs)
def set_servo_angle_j(self, angles, speed=None, mvacc=None, mvtime=None, is_radian=None, **kwargs):
"""
Set the servo angle, execute only the last instruction, need to be set to servo motion mode(self.set_mode(1))
Note:
1. This interface does not modify the value of last_used_angles/last_used_joint_speed/last_used_joint_acc
2. This interface is only used in the base coordinate system.
:param angles: angle list, (unit: rad if is_radian is True else °)
:param speed: speed, reserved
:param mvacc: acceleration, reserved
:param mvtime: 0, reserved
:param is_radian: the angles in radians or not, default is self.default_is_radian
:param kwargs: reserved
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_servo_angle_j(angles, speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian, **kwargs)
def set_servo_cartesian(self, mvpose, speed=None, mvacc=None, mvtime=0, is_radian=None, is_tool_coord=False, **kwargs):
"""
Set the servo cartesian, execute only the last instruction, need to be set to servo motion mode(self.set_mode(1))
:param mvpose: cartesian position, [x(mm), y(mm), z(mm), roll(rad or °), pitch(rad or °), yaw(rad or °)]
:param speed: move speed (mm/s), reserved
:param mvacc: move acceleration (mm/s^2), reserved
:param mvtime: 0, reserved
:param is_radian: the roll/pitch/yaw of mvpose in radians or not, default is self.default_is_radian
:param is_tool_coord: is tool coordinate or not
:param kwargs: reserved
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_servo_cartesian(mvpose, speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian,
is_tool_coord=is_tool_coord, **kwargs)
def move_circle(self, pose1, pose2, percent, speed=None, mvacc=None, mvtime=None, is_radian=None, wait=False, timeout=None, **kwargs):
"""
The motion calculates the trajectory of the space circle according to the three-point coordinates.
The three-point coordinates are (current starting point, pose1, pose2).
:param pose1: cartesian position, [x(mm), y(mm), z(mm), roll(rad or °), pitch(rad or °), yaw(rad or °)]
:param pose2: cartesian position, [x(mm), y(mm), z(mm), roll(rad or °), pitch(rad or °), yaw(rad or °)]
:param percent: the percentage of arc length and circumference of the movement
:param speed: move speed (mm/s, rad/s), default is self.last_used_tcp_speed
:param mvacc: move acceleration (mm/s^2, rad/s^2), default is self.last_used_tcp_acc
:param mvtime: 0, reserved
:param is_radian: roll/pitch/yaw value is radians or not, default is self.default_is_radian
:param wait: whether to wait for the arm to complete, default is False
:param timeout: maximum waiting time(unit: second), default is None(no timeout), only valid if wait is True
:param kwargs: reserved
:return: code
code: See the API code documentation for details.
code < 0: the last_used_tcp_speed/last_used_tcp_acc will not be modified
code >= 0: the last_used_tcp_speed/last_used_tcp_acc will be modified
"""
return self._arm.move_circle(pose1, pose2, percent, speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian, wait=wait, timeout=timeout, **kwargs)
def move_gohome(self, speed=None, mvacc=None, mvtime=None, is_radian=None, wait=False, timeout=None):
"""
Move to go home (Back to zero), the API will modify self.last_used_position and self.last_used_angles value
Warnning: without limit detection
Note:
1. The API will change self.last_used_position value into [201.5, 0, 140.5, -180, 0, 0]
2. The API will change self.last_used_angles value into [0, 0, 0, 0, 0, 0, 0]
3. If you want to wait for the robot to complete this action and then return, please set the parameter wait to True.
ex: code = arm.move_gohome(wait=True)
4. This interface does not modify the value of last_used_angles/last_used_joint_speed/last_used_joint_acc
:param speed: gohome speed (unit: rad/s if is_radian is True else °/s), default is 50 °/s
:param mvacc: gohome acceleration (unit: rad/s^2 if is_radian is True else °/s^2), default is 5000 °/s^2
:param mvtime: reserved
:param is_radian: the speed and acceleration are in radians or not, default is self.default_is_radian
:param wait: whether to wait for the arm to complete, default is False
:param timeout: maximum waiting time(unit: second), default is None(no timeout), only valid if wait is True
:return: code
code: See the API code documentation for details.
"""
return self._arm.move_gohome(speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian, wait=wait, timeout=timeout)
def move_arc_lines(self, paths, is_radian=None, times=1, first_pause_time=0.1, repeat_pause_time=0,
automatic_calibration=True, speed=None, mvacc=None, mvtime=None, wait=False):
"""
Continuous linear motion with interpolation.
Note:
1. If an error occurs, it will return early.
2. If the emergency_stop interface is called actively, it will return early.
3. The last_used_position/last_used_tcp_speed/last_used_tcp_acc will be modified.
4. The last_used_angles/last_used_joint_speed/last_used_joint_acc will not be modified.
:param paths: cartesian path list
1. Specify arc radius: [[x, y, z, roll, pitch, yaw, radius], ....]
2. Do not specify arc radius (radius=0): [[x, y, z, roll, pitch, yaw], ....]
3. If you want to plan the continuous motion,set radius>0.
:param is_radian: roll/pitch/yaw of paths are in radians or not, default is self.default_is_radian
:param times: repeat times, 0 is infinite loop, default is 1
:param first_pause_time: sleep time at first, purpose is to cache the commands and plan continuous motion, default is 0.1s
:param repeat_pause_time: interval between repeated movements, unit: (s)second
:param automatic_calibration: automatic calibration or not, default is True
:param speed: move speed (mm/s, rad/s), default is self.last_used_tcp_speed
:param mvacc: move acceleration (mm/s^2, rad/s^2), default is self.last_used_tcp_acc
:param mvtime: 0, reserved
:param wait: whether to wait for the arm to complete, default is False
"""
return self._arm.move_arc_lines(paths, is_radian=is_radian, times=times, first_pause_time=first_pause_time,
repeat_pause_time=repeat_pause_time, automatic_calibration=automatic_calibration,
speed=speed, mvacc=mvacc, mvtime=mvtime, wait=wait)
def set_servo_attach(self, servo_id=None):
"""
Attach the servo
:param servo_id: 1-(Number of axes), 8, if servo_id is 8, will attach all servo
1. 1-(Number of axes): attach only one joint
ex: arm.set_servo_attach(servo_id=1)
2: 8: attach all joints
ex: arm.set_servo_attach(servo_id=8)
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_servo_attach(servo_id=servo_id)
def set_servo_detach(self, servo_id=None):
"""
Detach the servo, be sure to do protective work before unlocking to avoid injury or damage.
:param servo_id: 1-(Number of axes), 8, if servo_id is 8, will detach all servo
1. 1-(Number of axes): detach only one joint
ex: arm.set_servo_detach(servo_id=1)
2: 8: detach all joints, please
ex: arm.set_servo_detach(servo_id=8)
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_servo_detach(servo_id=servo_id)
def get_version(self):
"""
Get the xArm firmware version
:return: tuple((code, version)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_version()
def get_robot_sn(self):
"""
Get the xArm sn
:return: tuple((code, sn)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_robot_sn()
def check_verification(self):
"""
check verification
:return: tuple((code, status)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
status:
0: verified
other: not verified
"""
return self._arm.check_verification()
def shutdown_system(self, value=1):
"""
Shutdown the xArm controller system
:param value: 1: remote shutdown
:return: code
code: See the API code documentation for details.
"""
return self._arm.shutdown_system(value=value)
def get_trajectories(self):
"""
get the trajectories
Note:
1. This interface relies on xArmStudio 1.2.0 or above
2. This interface relies on Firmware 1.2.0 or above
:return: tuple((code, trajectories))
code: See the API code documentation for details.
trajectories: [{
'name': name, # The name of the trajectory
'duration': duration, # The duration of the trajectory (seconds)
}]
"""
return self._arm.get_trajectories()
def start_record_trajectory(self):
"""
Start trajectory recording, only in teach mode, so you need to set joint teaching mode before.
Note:
1. This interface relies on Firmware 1.2.0 or above
2. set joint teaching mode: set_mode(2);set_state(0)
:return: code
code: See the API code documentation for details.
"""
return self._arm.start_record_trajectory()
def stop_record_trajectory(self, filename=None):
"""
Stop trajectory recording
Note:
1. This interface relies on Firmware 1.2.0 or above
:param filename: The name to save
1. Only strings consisting of English or numbers are supported, and the length is no more than 50.
2. The trajectory is saved in the controller box.
3. If the filename is None, just stop recording, do not save, you need to manually call `save_record_trajectory` save before changing the mode. otherwise it will be lost
4. This action will overwrite the trajectory with the same name
5. Empty the trajectory in memory after saving
:return: code
code: See the API code documentation for details.
"""
return self._arm.stop_record_trajectory(filename=filename)
def save_record_trajectory(self, filename, wait=True, timeout=2):
"""
Save the trajectory you just recorded
Note:
1. This interface relies on Firmware 1.2.0 or above
:param filename: The name to save
1. Only strings consisting of English or numbers are supported, and the length is no more than 50.
2. The trajectory is saved in the controller box.
3. This action will overwrite the trajectory with the same name
4. Empty the trajectory in memory after saving, so repeated calls will cause the recorded trajectory to be covered by an empty trajectory.
:param wait: Whether to wait for saving, default is True
:param timeout: Timeout waiting for saving to complete
:return: code
code: See the API code documentation for details.
"""
return self._arm.save_record_trajectory(filename, wait=wait, timeout=timeout)
def load_trajectory(self, filename, wait=True, timeout=10):
"""
Load the trajectory
Note:
1. This interface relies on Firmware 1.2.0 or above
:param filename: The name of the trajectory to load
:param wait: Whether to wait for loading, default is True
:param timeout: Timeout waiting for loading to complete
:return: code
code: See the API code documentation for details.
"""
return self._arm.load_trajectory(filename, wait=wait, timeout=timeout)
def playback_trajectory(self, times=1, filename=None, wait=True, double_speed=1):
"""
Playback trajectory
Note:
1. This interface relies on Firmware 1.2.0 or above
:param times: Number of playbacks,
1. Only valid when the current position of the arm is the end position of the track, otherwise it will only be played once.
:param filename: The name of the trajectory to play back
1. If filename is None, you need to manually call the `load_trajectory` to load the trajectory.
:param wait: whether to wait for the arm to complete, default is False
:param double_speed: double speed, only support 1/2/4, default is 1, only available if version > 1.2.11
:return: code
code: See the API code documentation for details.
"""
return self._arm.playback_trajectory(times=times, filename=filename, wait=wait, double_speed=double_speed)
def get_trajectory_rw_status(self):
"""
Get trajectory read/write status
:return: (code, status)
code: See the API code documentation for details.
status:
0: no read/write
1: loading
2: load success
3: load failed
4: saving
5: save success
6: save failed
"""
return self._arm.get_trajectory_rw_status()
def get_reduced_mode(self):
"""
Get reduced mode
Note:
1. This interface relies on Firmware 1.2.0 or above
:return: tuple((code, mode))
code: See the API code documentation for details.
mode: 0 or 1, 1 means that the reduced mode is turned on. 0 means that the reduced mode is not turned on
"""
return self._arm.get_reduced_mode()
def get_reduced_states(self, is_radian=None):
"""
Get states of the reduced mode
Note:
1. This interface relies on Firmware 1.2.0 or above
:param is_radian: the max_joint_speed of the states is in radians or not, default is self.default_is_radian
:return: tuple((code, states))
code: See the API code documentation for details.
states: [....]
if version > 1.2.11:
states: [
reduced_mode_is_on,
[reduced_x_max, reduced_x_min, reduced_y_max, reduced_y_min, reduced_z_max, reduced_z_min],
reduced_max_tcp_speed,
reduced_max_joint_speed,
joint_ranges([joint-1-min, joint-1-max, ..., joint-7-min, joint-7-max]),
safety_boundary_is_on,
collision_rebound_is_on,
]`
if version <= 1.2.11:
states: [
reduced_mode_is_on,
[reduced_x_max, reduced_x_min, reduced_y_max, reduced_y_min, reduced_z_max, reduced_z_min],
reduced_max_tcp_speed,
reduced_max_joint_speed,
]`
"""
return self._arm.get_reduced_states(is_radian=is_radian)
def set_reduced_mode(self, on):
"""
Turn on/off reduced mode
Note:
1. This interface relies on Firmware 1.2.0 or above
:param on: True/False
such as:Turn on the reduced mode : code=arm.set_reduced_mode(True)
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_reduced_mode(on)
def set_reduced_max_tcp_speed(self, speed):
"""
Set the maximum tcp speed of the reduced mode
Note:
1. This interface relies on Firmware 1.2.0 or above
2. Only reset the reduced mode to take effect (`set_reduced_mode(True)`)
:param speed: speed (mm/s)
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_reduced_max_tcp_speed(speed)
def set_reduced_max_joint_speed(self, speed, is_radian=None):
"""
Set the maximum joint speed of the reduced mode
Note:
1. This interface relies on Firmware 1.2.0 or above
2. Only reset the reduced mode to take effect (`set_reduced_mode(True)`)
:param speed: speed (°/s or rad/s)
:param is_radian: the speed is in radians or not, default is self.default_is_radian
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_reduced_max_joint_speed(speed, is_radian=is_radian)
def set_reduced_tcp_boundary(self, boundary):
"""
Set the boundary of the safety boundary mode
Note:
1. This interface relies on Firmware 1.2.0 or above
2. Only reset the reduced mode to take effect (`set_reduced_mode(True)`)
:param boundary: [x_max, x_min, y_max, y_min, z_max, z_min]
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_reduced_tcp_boundary(boundary)
def set_reduced_joint_range(self, joint_range, is_radian=None):
"""
Set the joint range of the reduced mode
Note:
1. This interface relies on Firmware 1.2.11 or above
2. Only reset the reduced mode to take effect (`set_reduced_mode(True)`)
:param joint_range: [joint-1-min, joint-1-max, ..., joint-7-min, joint-7-max]
:param is_radian: the param joint_range are in radians or not, default is self.default_is_radian
:return:
"""
return self._arm.set_reduced_joint_range(joint_range, is_radian=is_radian)
def set_fence_mode(self, on):
"""
Set the fence mode,turn on/off fense mode
Note:
1. This interface relies on Firmware 1.2.11 or above
:param on: True/False
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_fense_mode(on)
def set_collision_rebound(self, on):
"""
Set the collision rebound,turn on/off collision rebound
Note:
1. This interface relies on Firmware 1.2.11 or above
:param on: True/False
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_collision_rebound(on)
def set_world_offset(self, offset, is_radian=None):
"""
Set the base coordinate offset
Note:
1. This interface relies on Firmware 1.2.11 or above
:param offset: [x, y, z, roll, pitch, yaw]
:param is_radian: the roll/pitch/yaw in radians or not, default is self.default_is_radian
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_world_offset(offset, is_radian=is_radian)
def get_is_moving(self):
"""
Check xArm is moving or not
:return: True/False
"""
return self._arm.get_is_moving()
def get_state(self):
"""
Get state
:return: tuple((code, state)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
state:
1: in motion
2: sleeping
3: suspended
4: stopping
"""
return self._arm.get_state()
def set_state(self, state=0):
"""
Set the xArm state
:param state: default is 0
0: sport state
3: pause state
4: stop state
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_state(state=state)
def set_mode(self, mode=0):
"""
Set the xArm mode
:param mode: default is 0
0: position control mode
1: servo motion mode
Note: the use of the set_servo_angle_j interface must first be set to this mode
Note: the use of the set_servo_cartesian interface must first be set to this mode
2: joint teaching mode
Note: use this mode to ensure that the arm has been identified and the control box and arm used for identification are one-to-one.
3: cartesian teaching mode (invalid)
4: joint velocity control mode
5: cartesian velocity control mode
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_mode(mode=mode)
def get_cmdnum(self):
"""
Get the cmd count in cache
:return: tuple((code, cmd_num)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_cmdnum()
def get_err_warn_code(self, show=False, lang='en'):
"""
Get the controller error and warn code
:param show: show the detail info if True
:param lang: show language, en/cn, degault is en, only available if show is True
:return: tuple((code, [error_code, warn_code])), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
error_code: See Chapter 7 of the xArm User Manual for details.
warn_code: See Chapter 7 of the xArm User Manual for details.
"""
return self._arm.get_err_warn_code(show=show, lang=lang)
def clean_error(self):
"""
Clean the error, need to be manually enabled motion(arm.motion_enable(True)) and set state(arm.set_state(state=0))after clean error
:return: code
code: See the API code documentation for details.
"""
return self._arm.clean_error()
def clean_warn(self):
"""
Clean the warn
:return: code
code: See the API code documentation for details.
"""
return self._arm.clean_warn()
def motion_enable(self, enable=True, servo_id=None):
"""
Motion enable
:param enable:True/False
:param servo_id: 1-(Number of axes), None(8)
:return: code
code: See the API code documentation for details.
"""
return self._arm.motion_enable(servo_id=servo_id, enable=enable)
def reset(self, speed=None, mvacc=None, mvtime=None, is_radian=None, wait=False, timeout=None):
"""
Reset the xArm
Warnning: without limit detection
Note:
1. If there are errors or warnings, this interface will clear the warnings and errors.
2. If not ready, the api will auto enable motion and set state
3. This interface does not modify the value of last_used_angles/last_used_joint_speed/last_used_joint_acc
:param speed: reset speed (unit: rad/s if is_radian is True else °/s), default is 50 °/s
:param mvacc: reset acceleration (unit: rad/s^2 if is_radian is True else °/s^2), default is 5000 °/s^2
:param mvtime: reserved
:param is_radian: the speed and acceleration are in radians or not, default is self.default_is_radian
:param wait: whether to wait for the arm to complete, default is False
:param timeout: maximum waiting time(unit: second), default is None(no timeout), only valid if wait is True
"""
return self._arm.reset(speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian, wait=wait, timeout=timeout)
def set_pause_time(self, sltime, wait=False):
"""
Set the arm pause time, xArm will pause sltime second
:param sltime: sleep time,unit:(s)second
:param wait: wait or not, default is False
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_pause_time(sltime, wait=wait)
def set_tcp_offset(self, offset, is_radian=None, **kwargs):
"""
Set the tool coordinate system offset at the end
Note:
1. Do not use if not required
2. If not saved and you want to revert to the last saved value, please reset the offset by set_tcp_offset([0, 0, 0, 0, 0, 0])
3. If not saved, it will be lost after reboot
4. The save_conf interface can record the current settings and will not be lost after the restart.
5. The clean_conf interface can restore system default settings
:param offset: [x, y, z, roll, pitch, yaw]
:param is_radian: the roll/pitch/yaw in radians or not, default is self.default_is_radian
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_tcp_offset(offset, is_radian=is_radian, **kwargs)
def set_tcp_jerk(self, jerk):
"""
Set the translational jerk of Cartesian space
Note:
1. Do not use if not required
2. If not saved, it will be lost after reboot
3. The save_conf interface can record the current settings and will not be lost after the restart.
4. The clean_conf interface can restore system default settings
:param jerk: jerk (mm/s^3)
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_tcp_jerk(jerk)
def set_tcp_maxacc(self, acc):
"""
Set the max translational acceleration of Cartesian space
Note:
1. Do not use if not required
2. If not saved, it will be lost after reboot
3. The save_conf interface can record the current settings and will not be lost after the restart.
4. The clean_conf interface can restore system default settings
:param acc: max acceleration (mm/s^2)
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_tcp_maxacc(acc)
def set_joint_jerk(self, jerk, is_radian=None):
"""
Set the jerk of Joint space
Note:
1. Do not use if not required
2. If not saved, it will be lost after reboot
3. The save_conf interface can record the current settings and will not be lost after the restart.
4. The clean_conf interface can restore system default settings
:param jerk: jerk (°/s^3 or rad/s^3)
:param is_radian: the jerk in radians or not, default is self.default_is_radian
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_joint_jerk(jerk, is_radian=is_radian)
def set_joint_maxacc(self, acc, is_radian=None):
"""
Set the max acceleration of Joint space
Note:
1. Do not use if not required
2. If not saved, it will be lost after reboot
3. The save_conf interface can record the current settings and will not be lost after the restart.
4. The clean_conf interface can restore system default settings
:param acc: max acceleration (°/s^2 or rad/s^2)
:param is_radian: the jerk in radians or not, default is self.default_is_radian
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_joint_maxacc(acc, is_radian=is_radian)
def set_tcp_load(self, weight, center_of_gravity):
"""
Set the end load of xArm
Note:
1. Do not use if not required
2. If not saved, it will be lost after reboot
3. The save_conf interface can record the current settings and will not be lost after the restart.
4. The clean_conf interface can restore system default settings
:param weight: load weight (unit: kg)
:param center_of_gravity: load center of gravity, such as [x(mm), y(mm), z(mm)]
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_tcp_load(weight, center_of_gravity)
def set_collision_sensitivity(self, value):
"""
Set the sensitivity of collision
Note:
1. Do not use if not required
2. If not saved, it will be lost after reboot
3. The save_conf interface can record the current settings and will not be lost after the restart.
4. The clean_conf interface can restore system default settings
:param value: sensitivity value, 0~5
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_collision_sensitivity(value)
def set_teach_sensitivity(self, value):
"""
Set the sensitivity of drag and teach
Note:
1. Do not use if not required
2. If not saved, it will be lost after reboot
3. The save_conf interface can record the current settings and will not be lost after the restart.
4. The clean_conf interface can restore system default settings
:param value: sensitivity value, 1~5
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_teach_sensitivity(value)
def set_gravity_direction(self, direction):
"""
Set the direction of gravity
Note:
1. Do not use if not required
2. If not saved, it will be lost after reboot
3. The save_conf interface can record the current settings and will not be lost after the restart.
4. The clean_conf interface can restore system default settings
:param direction: direction of gravity, such as [x(mm), y(mm), z(mm)]
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_gravity_direction(direction=direction)
def set_mount_direction(self, base_tilt_deg, rotation_deg, is_radian=None):
"""
Set the mount direction
Note:
1. Do not use if not required
2. If not saved, it will be lost after reboot
3. The save_conf interface can record the current settings and will not be lost after the restart.
4. The clean_conf interface can restore system default settings
:param base_tilt_deg: tilt degree
:param rotation_deg: rotation degree
:param is_radian: the base_tilt_deg/rotation_deg in radians or not, default is self.default_is_radian
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_mount_direction(base_tilt_deg, rotation_deg, is_radian=is_radian)
def clean_conf(self):
"""
Clean current config and restore system default settings
Note:
1. This interface will clear the current settings and restore to the original settings (system default settings)
:return: code
code: See the API code documentation for details.
"""
return self._arm.clean_conf()
def save_conf(self):
"""
Save config
Note:
1. This interface can record the current settings and will not be lost after the restart.
2. The clean_conf interface can restore system default settings
:return: code
code: See the API code documentation for details.
"""
return self._arm.save_conf()
def get_inverse_kinematics(self, pose, input_is_radian=None, return_is_radian=None):
"""
Get inverse kinematics
:param pose: [x(mm), y(mm), z(mm), roll(rad or °), pitch(rad or °), yaw(rad or °)]
Note: the roll/pitch/yaw unit is radian if input_is_radian is True, else °
:param input_is_radian: the param pose value(only roll/pitch/yaw) is in radians or not, default is self.default_is_radian
:param return_is_radian: the returned value is in radians or not, default is self.default_is_radian
:return: tuple((code, angles)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
angles: [angle-1(rad or °), angle-2, ..., angle-(Number of axes)] or []
Note: the returned angle value is radians if return_is_radian is True, else °
"""
return self._arm.get_inverse_kinematics(pose, input_is_radian=input_is_radian, return_is_radian=return_is_radian)
def get_forward_kinematics(self, angles, input_is_radian=None, return_is_radian=None):
"""
Get forward kinematics
:param angles: [angle-1, angle-2, ..., angle-n], n is the number of axes of the arm
:param input_is_radian: the param angles value is in radians or not, default is self.default_is_radian
:param return_is_radian: the returned value is in radians or not, default is self.default_is_radian
:return: tuple((code, pose)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
pose: [x(mm), y(mm), z(mm), roll(rad or °), pitch(rad or °), yaw(rad or °)] or []
Note: the roll/pitch/yaw value is radians if return_is_radian is True, else °
"""
return self._arm.get_forward_kinematics(angles, input_is_radian=input_is_radian, return_is_radian=return_is_radian)
def is_tcp_limit(self, pose, is_radian=None):
"""
Check the tcp pose is in limit
:param pose: [x, y, z, roll, pitch, yaw]
:param is_radian: roll/pitch/yaw value is radians or not, default is self.default_is_radian
:return: tuple((code, limit)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
limit: True/False/None, limit or not, or failed
"""
return self._arm.is_tcp_limit(pose, is_radian=is_radian)
def is_joint_limit(self, joint, is_radian=None):
"""
Check the joint angle is in limit
:param joint: [angle-1, angle-2, ..., angle-n], n is the number of axes of the arm
:param is_radian: angle value is radians or not, default is self.default_is_radian
:return: tuple((code, limit)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
limit: True/False/None, limit or not, or failed
"""
return self._arm.is_joint_limit(joint, is_radian=is_radian)
def emergency_stop(self):
"""
Emergency stop (set_state(4) -> motion_enable(True) -> set_state(0))
Note:
1. This interface does not automatically clear the error. If there is an error, you need to handle it according to the error code.
"""
return self._arm.emergency_stop()
def set_gripper_enable(self, enable, **kwargs):
"""
Set the gripper enable
:param enable: enable or not
Note: such as code = arm.set_gripper_enable(True) #turn on the Gripper
:return: code
code: See the Gripper code documentation for details.
"""
return self._arm.set_gripper_enable(enable, **kwargs)
def set_gripper_mode(self, mode, **kwargs):
"""
Set the gripper mode
:param mode: 0: location mode
Note: such as code = rm.set_gripper_mode(0)
:return: code
code: See the Gripper code documentation for details.
"""
return self._arm.set_gripper_mode(mode, **kwargs)
def get_gripper_position(self, **kwargs):
"""
Get the gripper position
:return: tuple((code, pos)), only when code is 0, the returned result is correct.
code: See the Gripper code documentation for details.
"""
return self._arm.get_gripper_position(**kwargs)
def set_gripper_position(self, pos, wait=False, speed=None, auto_enable=False, timeout=None, **kwargs):
"""
Set the gripper position
:param pos: pos
:param wait: wait or not, default is False
:param speed: speed,unit:r/min
:param auto_enable: auto enable or not, default is False
:param timeout: wait time, unit:second, default is 10s
:return: code
code: See the Gripper code documentation for details.
"""
return self._arm.set_gripper_position(pos, wait=wait, speed=speed, auto_enable=auto_enable, timeout=timeout, **kwargs)
def set_gripper_speed(self, speed, **kwargs):
"""
Set the gripper speed
:param speed:
:return: code
code: See the Gripper code documentation for details.
"""
return self._arm.set_gripper_speed(speed, **kwargs)
def get_gripper_err_code(self, **kwargs):
"""
Get the gripper error code
:return: tuple((code, err_code)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
err_code: See the Gripper code documentation for details.
"""
return self._arm.get_gripper_err_code(**kwargs)
def clean_gripper_error(self, **kwargs):
"""
Clean the gripper error
:return: code
code: See the Gripper code documentation for details.
"""
return self._arm.clean_gripper_error(**kwargs)
def get_tgpio_digital(self, ionum=None):
"""
Get the digital value of the specified Tool GPIO
:param ionum: 0 or 1 or None(both 0 and 1), default is None
:return: tuple((code, value or value list)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_tgpio_digital(ionum)
def set_tgpio_digital(self, ionum, value, delay_sec=None):
"""
Set the digital value of the specified Tool GPIO
:param ionum: 0 or 1
:param value: value
:param delay_sec: delay effective time from the current start, in seconds, default is None(effective immediately)
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_tgpio_digital(ionum=ionum, value=value, delay_sec=delay_sec)
def get_tgpio_analog(self, ionum=None):
"""
Get the analog value of the specified Tool GPIO
:param ionum: 0 or 1 or None(both 0 and 1), default is None
:return: tuple((code, value or value list)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_tgpio_analog(ionum)
def get_vacuum_gripper(self):
"""
Get vacuum gripper state
:return: tuple((code, state)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
state: suction cup state
0: suction cup is off
1: suction cup is on
"""
return self._arm.get_suction_cup()
def set_vacuum_gripper(self, on, wait=False, timeout=3, delay_sec=None):
"""
Set vacuum gripper state
:param on: open or not
on=True: equivalent to calling `set_tgpio_digital(0, 1)` and `set_tgpio_digital(1, 0)`
on=False: equivalent to calling `set_tgpio_digital(0, 0)` and `set_tgpio_digital(1, 1)`
:param wait: wait or not, default is False
:param timeout: wait time, unit:second, default is 3s
:param delay_sec: delay effective time from the current start, in seconds, default is None(effective immediately)
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_suction_cup(on, wait=wait, timeout=timeout, delay_sec=delay_sec)
def get_cgpio_digital(self, ionum=None):
"""
Get the digital value of the specified Controller GPIO
:param ionum: 0~7 or None(both 0~7), default is None
:return: tuple((code, value or value list)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_cgpio_digital(ionum=ionum)
def get_cgpio_analog(self, ionum=None):
"""
Get the analog value of the specified Controller GPIO
:param ionum: 0 or 1 or None(both 0 and 1), default is None
:return: tuple((code, value or value list)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_cgpio_analog(ionum=ionum)
def set_cgpio_digital(self, ionum, value, delay_sec=None):
"""
Set the digital value of the specified Controller GPIO
:param ionum: 0~7
:param value: value
:param delay_sec: delay effective time from the current start, in seconds, default is None(effective immediately)
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_cgpio_digital(ionum=ionum, value=value, delay_sec=delay_sec)
def set_cgpio_analog(self, ionum, value):
"""
Set the analog value of the specified Controller GPIO
:param ionum: 0 or 1
:param value: value
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_cgpio_analog(ionum=ionum, value=value)
def set_cgpio_digital_input_function(self, ionum, fun):
"""
Set the digital input functional mode of the Controller GPIO
:param ionum: 0~7
:param fun: functional mode
0: general input
1: external emergency stop
2: reversed, protection reset
3: reversed, reduced mode
4: reversed, operating mode
5: reversed, three-state switching signal
11: offline task
12: teaching mode
13: reduced mode
14: enable arm
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_cgpio_digital_input_function(ionum=ionum, fun=fun)
def set_cgpio_digital_output_function(self, ionum, fun):
"""
Set the digital output functional mode of the specified Controller GPIO
:param ionum: 0~7
:param fun: functionnal mode
0: general output
1: emergency stop
2: in motion
11: has error
12: has warn
13: in collision
14: in teaching
15: in offline task
16: reduced mode
17: enable arm
18: emergency stop is pressed
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_cgpio_digital_output_function(ionum=ionum, fun=fun)
def get_cgpio_state(self):
"""
Get the state of the Controller GPIO
:return: code, states
code: See the API code documentation for details.
states: [...]
states[0]: contorller gpio module state
states[0] == 0: normal
states[0] == 1:wrong
states[0] == 6:communication failure
states[1]: controller gpio module error code
states[1] == 0: normal
states[1] != 0:error code
states[2]: digital input functional gpio state
Note: digital-i-input functional gpio state = states[2] >> i & 0x01
states[3]: digital input configuring gpio state
Note: digital-i-input configuring gpio state = states[3] >> i & 0x01
states[4]: digital output functional gpio state
Note: digital-i-output functional gpio state = states[4] >> i & 0x01
states[5]: digital output configuring gpio state
Note: digital-i-output configuring gpio state = states[5] >> i & 0x01
states[6]: analog-0 input value
states[7]: analog-1 input value
states[8]: analog-0 output value
states[9]: analog-1 output value
states[10]: digital input functional info, [digital-0-input-functional-mode, ... digital-7-input-functional-mode]
states[11]: digital output functional info, [digital-0-output-functional-mode, ... digital-7-output-functional-mode]
"""
return self._arm.get_cgpio_state()
def register_report_callback(self, callback=None, report_cartesian=True, report_joints=True,
report_state=True, report_error_code=True, report_warn_code=True,
report_mtable=True, report_mtbrake=True, report_cmd_num=True):
"""
Register the report callback, only available if enable_report is True
:param callback:
callback data:
{
'cartesian': [], # if report_cartesian is True
'joints': [], # if report_joints is True
'error_code': 0, # if report_error_code is True
'warn_code': 0, # if report_warn_code is True
'state': state, # if report_state is True
'mtbrake': mtbrake, # if report_mtbrake is True, and available if enable_report is True and the connect way is socket
'mtable': mtable, # if report_mtable is True, and available if enable_report is True and the connect way is socket
'cmdnum': cmdnum, # if report_cmd_num is True
}
:param report_cartesian: report cartesian or not, default is True
:param report_joints: report joints or not, default is True
:param report_state: report state or not, default is True
:param report_error_code: report error or not, default is True
:param report_warn_code: report warn or not, default is True
:param report_mtable: report motor enable states or not, default is True
:param report_mtbrake: report motor brake states or not, default is True
:param report_cmd_num: report cmdnum or not, default is True
:return: True/False
"""
return self._arm.register_report_callback(callback=callback,
report_cartesian=report_cartesian,
report_joints=report_joints,
report_state=report_state,
report_error_code=report_error_code,
report_warn_code=report_warn_code,
report_mtable=report_mtable,
report_mtbrake=report_mtbrake,
report_cmd_num=report_cmd_num)
def register_report_location_callback(self, callback=None, report_cartesian=True, report_joints=True):
"""
Register the report location callback, only available if enable_report is True
:param callback:
callback data:
{
"cartesian": [x, y, z, roll, pitch, yaw], ## if report_cartesian is True
"joints": [angle-1, angle-2, angle-3, angle-4, angle-5, angle-6, angle-7], ## if report_joints is True
}
:param report_cartesian: report or not, True/False, default is True
:param report_joints: report or not, True/False, default is True
:return: True/False
"""
return self._arm.register_report_location_callback(callback=callback,
report_cartesian=report_cartesian,
report_joints=report_joints)
def register_connect_changed_callback(self, callback=None):
"""
Register the connect status changed callback
:param callback:
callback data:
{
"connected": connected,
"reported": reported,
}
:return: True/False
"""
return self._arm.register_connect_changed_callback(callback=callback)
def register_state_changed_callback(self, callback=None):
"""
Register the state status changed callback, only available if enable_report is True
:param callback:
callback data:
{
"state": state,
}
:return: True/False
"""
return self._arm.register_state_changed_callback(callback=callback)
def register_mode_changed_callback(self, callback=None):
"""
Register the mode changed callback, only available if enable_report is True and the connect way is socket
:param callback:
callback data:
{
"mode": mode,
}
:return: True/False
"""
return self._arm.register_mode_changed_callback(callback=callback)
def register_mtable_mtbrake_changed_callback(self, callback=None):
"""
Register the motor enable states or motor brake states changed callback, only available if enable_report is True and the connect way is socket
:param callback:
callback data:
{
"mtable": [motor-1-motion-enable, motor-2-motion-enable, ...],
"mtbrake": [motor-1-brake-enable, motor-1-brake-enable,...],
}
:return: True/False
"""
return self._arm.register_mtable_mtbrake_changed_callback(callback=callback)
def register_error_warn_changed_callback(self, callback=None):
"""
Register the error code or warn code changed callback, only available if enable_report is True
:param callback:
callback data:
{
"error_code": error_code,
"warn_code": warn_code,
}
:return: True/False
"""
return self._arm.register_error_warn_changed_callback(callback=callback)
def register_cmdnum_changed_callback(self, callback=None):
"""
Register the cmdnum changed callback, only available if enable_report is True
:param callback:
callback data:
{
"cmdnum": cmdnum
}
:return: True/False
"""
return self._arm.register_cmdnum_changed_callback(callback=callback)
def register_temperature_changed_callback(self, callback=None):
"""
Register the temperature changed callback, only available if enable_report is True
:param callback:
callback data:
{
"temperatures": [servo-1-temperature, ...., servo-7-temperature]
}
:return: True/False
"""
return self._arm.register_temperature_changed_callback(callback=callback)
def register_count_changed_callback(self, callback=None):
"""
Register the counter value changed callback, only available if enable_report is True
:param callback:
callback data:
{
"count": counter value
}
:return: True/False
"""
return self._arm.register_count_changed_callback(callback=callback)
def register_iden_progress_changed_callback(self, callback=None):
"""
Register the Identification progress value changed callback, only available if enable_report is True
:param callback:
callback data:
{
"progress": progress value
}
:return: True/False
"""
return self._arm.register_iden_progress_changed_callback(callback=callback)
def release_report_callback(self, callback=None):
"""
Release the report callback
:param callback:
:return: True/False
"""
return self._arm.release_report_callback(callback)
def release_report_location_callback(self, callback=None):
"""
Release the location report callback
:param callback:
:return: True/False
"""
return self._arm.release_report_location_callback(callback)
def release_connect_changed_callback(self, callback=None):
"""
Release the connect changed callback
:param callback:
:return: True/False
"""
return self._arm.release_connect_changed_callback(callback)
def release_state_changed_callback(self, callback=None):
"""
Release the state changed callback
:param callback:
:return: True/False
"""
return self._arm.release_state_changed_callback(callback)
def release_mode_changed_callback(self, callback=None):
"""
Release the mode changed callback
:param callback:
:return: True/False
"""
return self._arm.release_mode_changed_callback(callback)
def release_mtable_mtbrake_changed_callback(self, callback=None):
"""
Release the motor enable states or motor brake states changed callback
:param callback:
:return: True/False
"""
return self._arm.release_mtable_mtbrake_changed_callback(callback)
def release_error_warn_changed_callback(self, callback=None):
"""
Release the error warn changed callback
:param callback:
:return: True/False
"""
return self._arm.release_error_warn_changed_callback(callback)
def release_cmdnum_changed_callback(self, callback=None):
"""
Release the cmdnum changed callback
:param callback:
:return: True/False
"""
return self._arm.release_cmdnum_changed_callback(callback)
def release_temperature_changed_callback(self, callback=None):
"""
Release the temperature changed callback
:param callback:
:return: True/False
"""
return self._arm.release_temperature_changed_callback(callback=callback)
def release_count_changed_callback(self, callback=None):
"""
Release the counter value changed callback
:param callback:
:return: True/False
"""
return self._arm.release_count_changed_callback(callback=callback)
def release_iden_progress_changed_callback(self, callback=None):
"""
Release the Identification progress value changed callback
:param callback:
:return: True/False
"""
return self._arm.release_iden_progress_changed_callback(callback=callback)
def get_servo_debug_msg(self, show=False, lang='en'):
"""
Get the servo debug msg, used only for debugging
:param show: show the detail info if True
:param lang: language, en/cn, default is en
:return: tuple((code, servo_info_list)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_servo_debug_msg(show=show, lang=lang)
def run_blockly_app(self, path, **kwargs):
"""
Run the app generated by xArmStudio software
:param path: app path
"""
return self._arm.run_blockly_app(path, **kwargs)
def run_gcode_file(self, path, **kwargs):
"""
Run the gcode file
:param path: gcode file path
"""
return self._arm.run_gcode_file(path, **kwargs)
def get_gripper_version(self):
"""
Get gripper version, only for debug
:return: (code, version)
code: See the API code documentation for details.
"""
return self._arm.get_gripper_version()
def get_servo_version(self, servo_id=1):
"""
Get servo version, only for debug
:param servo_id: servo id(1~7)
:return: (code, version)
code: See the API code documentation for details.
"""
return self._arm.get_servo_version(servo_id=servo_id)
def get_tgpio_version(self):
"""
Get tool gpio version, only for debug
:return: (code, version)
code: See the API code documentation for details.
"""
return self._arm.get_tgpio_version()
def get_harmonic_type(self, servo_id=1):
"""
Get harmonic type, only for debu
:return: (code, type)
code: See the API code documentation for details.
"""
return self._arm.get_harmonic_type(servo_id=servo_id)
def get_hd_types(self):
"""
Get harmonic types, only for debug
:return: (code, types)
code: See the API code documentation for details.
"""
return self._arm.get_hd_types()
def set_counter_reset(self):
"""
Reset counter value
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_counter_reset()
def set_counter_increase(self, val=1):
"""
Set counter plus value, only support plus 1
:param val: reversed
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_counter_increase(val)
def set_tgpio_digital_with_xyz(self, ionum, value, xyz, fault_tolerance_radius):
"""
Set the digital value of the specified Tool GPIO when the robot has reached the specified xyz position
:param ionum: 0 or 1
:param value: value
:param xyz: position xyz, as [x, y, z]
:param fault_tolerance_radius: fault tolerance radius
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_tgpio_digital_with_xyz(ionum, value, xyz, fault_tolerance_radius)
def set_cgpio_digital_with_xyz(self, ionum, value, xyz, fault_tolerance_radius):
"""
Set the digital value of the specified Controller GPIO when the robot has reached the specified xyz position
:param ionum: 0 ~ 7
:param value: value
:param xyz: position xyz, as [x, y, z]
:param fault_tolerance_radius: fault tolerance radius
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_cgpio_digital_with_xyz(ionum, value, xyz, fault_tolerance_radius)
def set_cgpio_analog_with_xyz(self, ionum, value, xyz, fault_tolerance_radius):
"""
Set the analog value of the specified Controller GPIO when the robot has reached the specified xyz position
:param ionum: 0 ~ 1
:param value: value
:param xyz: position xyz, as [x, y, z]
:param fault_tolerance_radius: fault tolerance radius
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_cgpio_analog_with_xyz(ionum, value, xyz, fault_tolerance_radius)
def config_tgpio_reset_when_stop(self, on_off):
"""
Config the Tool GPIO reset the digital output when the robot is in stop state
:param on_off: True/False
:return: code
code: See the API code documentation for details.
"""
return self._arm.config_io_reset_when_stop(1, on_off)
def config_cgpio_reset_when_stop(self, on_off):
"""
Config the Controller GPIO reset the digital output when the robot is in stop state
:param on_off: True/False
:return: code
code: See the API code documentation for details.
"""
return self._arm.config_io_reset_when_stop(0, on_off)
def set_position_aa(self, axis_angle_pose, speed=None, mvacc=None, mvtime=None,
is_radian=None, is_tool_coord=False, relative=False, wait=False, timeout=None, **kwargs):
"""
Set the pose represented by the axis angle pose
:param axis_angle_pose: the axis angle pose, [x(mm), y(mm), z(mm), rx(rad or °), ry(rad or °), rz(rad or °)]
:param speed: move speed (mm/s, rad/s), default is self.last_used_tcp_speed
:param mvacc: move acceleration (mm/s^2, rad/s^2), default is self.last_used_tcp_acc
:param mvtime: 0, reserved
:param is_radian: the rx/ry/rz of axis_angle_pose in radians or not, default is self.default_is_radian
:param is_tool_coord: is tool coordinate or not
:param relative: relative move or not
:param wait: whether to wait for the arm to complete, default is False
:param timeout: maximum waiting time(unit: second), default is None(no timeout), only valid if wait is True
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_position_aa(axis_angle_pose, speed=speed, mvacc=mvacc, mvtime=mvtime,
is_radian=is_radian, is_tool_coord=is_tool_coord, relative=relative,
wait=wait, timeout=timeout, **kwargs)
def set_servo_cartesian_aa(self, axis_angle_pose, speed=None, mvacc=None, is_radian=None, is_tool_coord=False, relative=False, **kwargs):
"""
Set the servo cartesian represented by the axis angle pose, execute only the last instruction, need to be set to servo motion mode(self.set_mode(1))
Note:
1. only available if firmware_version >= 1.4.7
:param axis_angle_pose: the axis angle pose, [x(mm), y(mm), z(mm), rx(rad or °), ry(rad or °), rz(rad or °)]
:param speed: move speed (mm/s), reserved
:param mvacc: move acceleration (mm/s^2), reserved
:param is_radian: the rx/ry/rz of axis_angle_pose in radians or not, default is self.default_is_radian
:param is_tool_coord: is tool coordinate or not
:param relative: relative move or not
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_servo_cartesian_aa(axis_angle_pose, speed=speed, mvacc=mvacc, is_radian=is_radian,
is_tool_coord=is_tool_coord, relative=relative, **kwargs)
def get_pose_offset(self, pose1, pose2, orient_type_in=0, orient_type_out=0, is_radian=None):
"""
Calculate the pose offset of two given points
:param pose1: [x(mm), y(mm), z(mm), roll/rx(rad or °), pitch/ry(rad or °), yaw/rz(rad or °)]
:param pose2: [x(mm), y(mm), z(mm), roll/rx(rad or °), pitch/ry(rad or °), yaw/rz(rad or °)]
:param orient_type_in: input attitude notation, 0 is RPY(roll/pitch/yaw) (default), 1 is axis angle(rx/ry/rz)
:param orient_type_out: notation of output attitude, 0 is RPY (default), 1 is axis angle
:param is_radian: the roll/rx/pitch/ry/yaw/rz of pose1/pose2/return_pose is radian or not
:return: tuple((code, pose)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
pose: [x(mm), y(mm), z(mm), roll/rx(rad or °), pitch/ry(rad or °), yaw/rz(rad or °)]
"""
return self._arm.get_pose_offset(pose1, pose2, orient_type_in=orient_type_in, orient_type_out=orient_type_out,
is_radian=is_radian)
def get_position_aa(self, is_radian=None):
"""
Get the pose represented by the axis angle pose
:param is_radian: the returned value (only rx/ry/rz) is in radians or not, default is self.default_is_radian
:return: tuple((code, [x, y, z, rx, ry, rz])), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_position_aa(is_radian=is_radian)
def get_joints_torque(self):
"""
Get joints torque
:return: tuple((code, joints_torque))
code: See the API code documentation for details.
joints_torque: joints torque
"""
return self._arm.get_joints_torque()
def set_joints_torque(self, joints_torque):
"""
Set joints torque,
Warning: If necessary, please do not set it randomly, it may damage the robot arm
:param joints_torque:
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_joints_torque(joints_torque)
def get_safe_level(self):
"""
Get safe level
:return: tuple((code, safe_level))
code: See the API code documentation for details.
safe_level: safe level
"""
return self._arm.get_safe_level()
def set_safe_level(self, level=4):
"""
Set safe level,
:param level: safe level, default is 4
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_safe_level(level=level)
def set_timeout(self, timeout):
"""
Set the timeout of cmd response
:param timeout: seconds
"""
return self._arm.set_timeout(timeout)
def robotiq_reset(self):
"""
Reset the robotiq gripper (clear previous activation if any)
:return: tuple((code, robotiq_response))
code: See the API code documentation for details.
robotiq_response: See the robotiq documentation
"""
return self._arm.robotiq_reset()
def robotiq_set_activate(self, wait=True, timeout=3):
"""
If not already activated. Activate the robotiq gripper
:param wait: whether to wait for the robotiq activate complete, default is True
:param timeout: maximum waiting time(unit: second), default is 3, only available if wait=True
:return: tuple((code, robotiq_response))
code: See the API code documentation for details.
robotiq_response: See the robotiq documentation
"""
return self._arm.robotiq_set_activate(wait=wait, timeout=timeout)
def robotiq_set_position(self, pos, speed=0xFF, force=0xFF, wait=True, timeout=5, **kwargs):
"""
Go to the position with determined speed and force.
:param pos: position of the gripper. Integer between 0 and 255. 0 being the open position and 255 being the close position.
:param speed: gripper speed between 0 and 255
:param force: gripper force between 0 and 255
:param wait: whether to wait for the robotion motion complete, default is True
:param timeout: maximum waiting time(unit: second), default is 5, only available if wait=True
:return: tuple((code, robotiq_response))
code: See the API code documentation for details.
robotiq_response: See the robotiq documentation
"""
return self._arm.robotiq_set_position(pos, speed=speed, force=force, wait=wait, timeout=timeout, **kwargs)
def robotiq_open(self, speed=0xFF, force=0xFF, wait=True, timeout=5, **kwargs):
"""
Open the robotiq gripper
:param speed: gripper speed between 0 and 255
:param force: gripper force between 0 and 255
:param wait: whether to wait for the robotiq motion to complete, default is True
:param timeout: maximum waiting time(unit: second), default is 5, only available if wait=True
:return: tuple((code, robotiq_response))
code: See the API code documentation for details.
robotiq_response: See the robotiq documentation
"""
return self._arm.robotiq_open(speed=speed, force=force, wait=wait, timeout=timeout, **kwargs)
def robotiq_close(self, speed=0xFF, force=0xFF, wait=True, timeout=5, **kwargs):
"""
Close the robotiq gripper
:param speed: gripper speed between 0 and 255
:param force: gripper force between 0 and 255
:param wait: whether to wait for the robotiq motion to complete, default is True
:param timeout: maximum waiting time(unit: second), default is 3, only available if wait=True
:return: tuple((code, robotiq_response))
code: See the API code documentation for details.
robotiq_response: See the robotiq documentation
"""
return self._arm.robotiq_close(speed=speed, force=force, wait=wait, timeout=timeout, **kwargs)
def robotiq_get_status(self, number_of_registers=3):
"""
Reading the status of robotiq gripper
:param number_of_registers: number of registers, 1/2/3, default is 3
number_of_registers=1: reading the content of register 0x07D0
number_of_registers=2: reading the content of register 0x07D0/0x07D1
number_of_registers=3: reading the content of register 0x07D0/0x07D1/0x07D2
Note:
register 0x07D0: Register GRIPPER STATUS
register 0x07D1: Register FAULT STATUS and register POSITION REQUEST ECHO
register 0x07D2: Register POSITION and register CURRENT
:return: tuple((code, robotiq_response))
code: See the API code documentation for details.
robotiq_response: See the robotiq documentation
"""
return self._arm.robotiq_get_status(number_of_registers=number_of_registers)
@property
def robotiq_status(self):
"""
The last state value obtained
Note:
1. Successfully call the robotiq related interface with wait parameter (when the parameter wait = True is set) will update this value
2. Successfully calling interface robotiq_get_status will partially or completely update this value
:return status dict
{
'gOBJ': 0, # Object detection status, is a built-in feature that provides information on possible object pick-up
'gSTA': 0, # Gripper status, returns the current status & motion of the Gripper fingers
'gGTO': 0, # Action status, echo of the rGTO bit(go to bit)
'gACT': 0, # Activation status, echo of the rACT bit(activation bit)
'kFLT': 0, # Echo of the requested position for the Gripper
'gFLT': 0, # Fault status
'gPR': 0, # Echo of the requested position for the Gripper
'gPO': 0, # Actual position of the Gripper obtained via the encoders
'gCU': 0, # The current is read instantaneously from the motor drive
}
Note: -1 means never updated
"""
return self._arm.robotiq_status
def set_bio_gripper_enable(self, enable=True, wait=True, timeout=3):
"""
If not already enabled. Enable the bio gripper
:param enable: enable or not
:param wait: whether to wait for the bio gripper enable complete, default is True
:param timeout: maximum waiting time(unit: second), default is 3, only available if wait=True
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_bio_gripper_enable(enable, wait=wait, timeout=timeout)
def set_bio_gripper_speed(self, speed):
"""
Set the speed of the bio gripper
:param speed: speed
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_bio_gripper_speed(speed)
def open_bio_gripper(self, speed=0, wait=True, timeout=5, **kwargs):
"""
Open the bio gripper
:param speed: speed value, default is 0 (not set the speed)
:param wait: whether to wait for the bio gripper motion complete, default is True
:param timeout: maximum waiting time(unit: second), default is 5, only available if wait=True
:return: code
code: See the API code documentation for details.
"""
return self._arm.open_bio_gripper(speed=speed, wait=wait, timeout=timeout, **kwargs)
def close_bio_gripper(self, speed=0, wait=True, timeout=5, **kwargs):
"""
Close the bio gripper
:param speed: speed value, default is 0 (not set the speed)
:param wait: whether to wait for the bio gripper motion complete, default is True
:param timeout: maximum waiting time(unit: second), default is 5, only available if wait=True
:return: code
code: See the API code documentation for details.
"""
return self._arm.close_bio_gripper(speed=speed, wait=wait, timeout=timeout, **kwargs)
def get_bio_gripper_status(self):
"""
Get the status of the bio gripper
:return: tuple((code, status))
code: See the API code documentation for details.
status: status
status & 0x03 == 0: stop
status & 0x03 == 1: motion
status & 0x03 == 2: catch
status & 0x03 == 3: error
(status >> 2) & 0x03 == 0: not enabled
(status >> 2) & 0x03 == 1: enabling
(status >> 2) & 0x03 == 2: enabled
"""
return self._arm.get_bio_gripper_status()
def get_bio_gripper_error(self):
"""
Get the error code of the bio gripper
:return: tuple((code, error_code))
code: See the API code documentation for details.
error_code: See Chapter 7 of the xArm User Manual for details.
"""
return self._arm.get_bio_gripper_error()
def clean_bio_gripper_error(self):
"""
Clean the error code of the bio gripper
:return: code
code: See the API code documentation for details.
"""
return self._arm.clean_bio_gripper_error()
def set_tgpio_modbus_timeout(self, timeout):
"""
Set the modbus timeout of the tool gpio
:param timeout: timeout, seconds
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_tgpio_modbus_timeout(timeout)
def set_tgpio_modbus_baudrate(self, baud):
"""
Set the modbus baudrate of the tool gpio
:param baud: 4800/9600/19200/38400/57600/115200/230400/460800/921600/1000000/1500000/2000000/2500000
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_tgpio_modbus_baudrate(baud)
def get_tgpio_modbus_baudrate(self):
"""
Get the modbus baudrate of the tool gpio
:return: tuple((code, baudrate)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
baudrate: the modbus baudrate of the tool gpio
"""
return self._arm.get_tgpio_modbus_baudrate()
def getset_tgpio_modbus_data(self, datas, min_res_len=0):
"""
Send the modbus data to the tool gpio
:param datas: data_list
:param min_res_len: the minimum length of modbus response data. Used to check the data length, if not specified, no check
:return: tuple((code, modbus_response))
code: See the API code documentation for details.
modbus_response: modbus response data
"""
return self._arm.getset_tgpio_modbus_data(datas, min_res_len=min_res_len)
def set_report_tau_or_i(self, tau_or_i=0):
"""
Set the reported torque or electric current
:param tau_or_i:
0: torque
1: electric current
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_report_tau_or_i(tau_or_i=tau_or_i)
def get_report_tau_or_i(self):
"""
Get the reported torque or electric current
:return: tuple((code, tau_or_i))
code: See the API code documentation for details.
tau_or_i:
0: torque
1: electric current
"""
return self._arm.get_report_tau_or_i()
def set_self_collision_detection(self, on_off):
"""
Set whether to enable self-collision detection
:param on_off: enable or not
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_self_collision_detection(on_off)
def set_collision_tool_model(self, tool_type, *args, **kwargs):
"""
Set the geometric model of the end effector for self collision detection
:param tool_type: the geometric model type
0: No end effector, no additional parameters required
1: xArm Gripper, no additional parameters required
2: xArm Vacuum Gripper, no additional parameters required
3: xArm Bio Gripper, no additional parameters required
4: Robotiq-2F-85 Gripper, no additional parameters required
5: Robotiq-2F-140 Gripper, no additional parameters required
21: Cylinder, need additional parameters radius, height
self.set_collision_tool_model(21, radius=45, height=137)
:param radius: the radius of cylinder, (unit: mm)
:param height: the height of cylinder, (unit: mm)
22: Cuboid, need additional parameters x, y, z
self.set_collision_tool_model(22, x=234, y=323, z=23)
:param x: the length of the cuboid in the x coordinate direction, (unit: mm)
:param y: the length of the cuboid in the y coordinate direction, (unit: mm)
:param z: the length of the cuboid in the z coordinate direction, (unit: mm)
:param args: additional parameters
:param kwargs: additional parameters
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_collision_tool_model(tool_type, *args, **kwargs)
def set_simulation_robot(self, on_off):
"""
Set the simulation robot
:param on_off: True/False
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_simulation_robot(on_off)
def vc_set_joint_velocity(self, speeds, is_radian=None, is_sync=True, duration=-1, **kwargs):
"""
Joint velocity control, need to be set to joint velocity control mode(self.set_mode(4))
Note:
1. only available if firmware_version >= 1.6.9
:param speeds: [spd_J1, spd_J2, ..., spd_J7]
:param is_radian: the spd_Jx in radians or not, default is self.default_is_radian
:param is_sync: whether all joints accelerate and decelerate synchronously, default is True
:param duration: The duration of this speed command, over this time will automatically set the speed to 0
Note: only available if firmware_version >= 1.8.0
duration > 0: seconds
duration == 0: Always effective, will not stop automatically
duration < 0: default value, only used to be compatible with the old protocol, equivalent to 0
:return: code
code: See the API code documentation for details.
"""
return self._arm.vc_set_joint_velocity(speeds, is_radian=is_radian, is_sync=is_sync, duration=duration, **kwargs)
def vc_set_cartesian_velocity(self, speeds, is_radian=None, is_tool_coord=False, duration=-1, **kwargs):
"""
Cartesian velocity control, need to be set to cartesian velocity control mode(self.set_mode(5))
Note:
1. only available if firmware_version >= 1.6.9
:param speeds: [spd_x, spd_y, spd_z, spd_rx, spd_ry, spd_rz]
:param is_radian: the spd_rx/spd_ry/spd_rz in radians or not, default is self.default_is_radian
:param is_tool_coord: is tool coordinate or not, default is False
:param duration: the maximum duration of the speed, over this time will automatically set the speed to 0
Note: only available if firmware_version >= 1.8.0
duration > 0: seconds, indicates the maximum number of seconds that this speed can be maintained
duration == 0: Always effective, will not stop automatically
duration < 0: default value, only used to be compatible with the old protocol, equivalent to 0
:return: code
code: See the API code documentation for details.
"""
return self._arm.vc_set_cartesian_velocity(speeds, is_radian=is_radian, is_tool_coord=is_tool_coord, duration=duration, **kwargs)
def calibrate_tcp_coordinate_offset(self, four_points, is_radian=None):
"""
Four-point method to calibrate tool coordinate system position offset
Note:
1. only available if firmware_version >= 1.6.9
:param four_points: a list of four teaching coordinate positions [x, y, z, roll, pitch, yaw]
:param is_radian: the roll/pitch/yaw value of the each point in radians or not, default is self.default_is_radian
:return: tuple((code, xyz_offset)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
xyz_offset: calculated xyz(mm) TCP offset, [x, y, z]
"""
return self._arm.calibrate_tcp_coordinate_offset(four_points, is_radian=is_radian)
def calibrate_tcp_orientation_offset(self, rpy_be, rpy_bt, input_is_radian=None, return_is_radian=None):
"""
An additional teaching point to calibrate the tool coordinate system attitude offset
Note:
1. only available if firmware_version >= 1.6.9
:param rpy_be: the rpy value of the teaching point without TCP offset [roll, pitch, yaw]
:param rpy_bt: the rpy value of the teaching point with TCP offset [roll, pitch, yaw]
:param input_is_radian: the roll/pitch/yaw value of rpy_be and rpy_bt in radians or not, default is self.default_is_radian
:param return_is_radian: the roll/pitch/yaw value of result in radians or not, default is self.default_is_radian
:return: tuple((code, rpy_offset)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
rpy_offset: calculated rpy TCP offset, [roll, pitch, yaw]
"""
return self._arm.calibrate_tcp_orientation_offset(rpy_be, rpy_bt, input_is_radian=input_is_radian, return_is_radian=return_is_radian)
def calibrate_user_orientation_offset(self, three_points, mode=0, trust_ind=0, input_is_radian=None, return_is_radian=None):
"""
Three-point method teaches user coordinate system posture offset
Note:
1. only available if firmware_version >= 1.6.9
Note:
First determine a point in the working space, move along the desired coordinate system x+ to determine the second point,
and then move along the desired coordinate system y+ to determine the third point.
Note that the x+ direction is as accurate as possible.
If the y+ direction is not completely perpendicular to x+, it will be corrected in the calculation process.
:param three_points: a list of teaching TCP coordinate positions [x, y, z, roll, pitch, yaw]
:param input_is_radian: the roll/pitch/yaw value of the each point in radians or not, default is self.default_is_radian
:param return_is_radian: the roll/pitch/yaw value of result in radians or not, default is self.default_is_radian
:return: tuple((code, rpy_offset)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
rpy_offset: calculated rpy user offset, [roll, pitch, yaw]
"""
return self._arm.calibrate_user_orientation_offset(three_points, mode=mode, trust_ind=trust_ind, input_is_radian=input_is_radian, return_is_radian=return_is_radian)
def calibrate_user_coordinate_offset(self, rpy_ub, pos_b_uorg, is_radian=None):
"""
An additional teaching point determines the position offset of the user coordinate system.
Note:
1. only available if firmware_version >= 1.6.9
:param rpy_ub: the confirmed offset of the base coordinate system in the user coordinate system [roll, pitch, yaw], which is the result of calibrate_user_orientation_offset()
:param pos_b_uorg: the position of the teaching point in the base coordinate system [x, y, z], if the arm cannot reach the target position, the user can manually input the position of the target in the base coordinate.
:param is_radian: the roll/pitch/yaw value of rpy_ub in radians or not, default is self.default_is_radian
:return: tuple((code, xyz_offset)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
xyz_offset: calculated xyz(mm) user offset, [x, y, z]
"""
return self._arm.calibrate_user_coordinate_offset(rpy_ub, pos_b_uorg, is_radian=is_radian)
def get_base_board_version(self, board_id=10):
"""
Get base board version
:param board_id: int
:return: : (code, version)
code: See the API code documentation for details.
"""
return self._arm.get_base_board_version(board_id)
def set_impedance(self, coord, c_axis, M, K, B, **kwargs):
"""
Set all parameters of impedance control through the Six-axis Force Torque Sensor.
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:param coord: task frame. 0: base frame. 1: tool frame.
:param c_axis: a 6d vector of 0s and 1s. 1 means that robot will be impedance in the corresponding axis of the task frame.
:param M: mass. (kg)
:param K: stiffness coefficient.
:param B: damping coefficient. invalid.
Note: the value is set to 2*sqrt(M*K) in controller.
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_impedance(coord, c_axis, M, K, B, **kwargs)
def set_impedance_mbk(self, M, K, B, **kwargs):
"""
Set mbk parameters of impedance control through the Six-axis Force Torque Sensor.
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:param M: mass. (kg)
:param K: stiffness coefficient.
:param B: damping coefficient. invalid.
Note: the value is set to 2*sqrt(M*K) in controller.
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_impedance_mbk(M, K, B, **kwargs)
def set_impedance_config(self, coord, c_axis):
"""
Set impedance control parameters of impedance control through the Six-axis Force Torque Sensor.
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:param coord: task frame. 0: base frame. 1: tool frame.
:param c_axis: a 6d vector of 0s and 1s. 1 means that robot will be impedance in the corresponding axis of the task frame.
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_impedance_config(coord, c_axis)
def config_force_control(self, coord, c_axis, f_ref, limits, **kwargs):
"""
Set force control parameters through the Six-axis Force Torque Sensor.
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:param coord: task frame. 0: base frame. 1: tool frame.
:param c_axis: a 6d vector of 0s and 1s. 1 means that robot will be compliant in the corresponding axis of the task frame.
:param f_ref: the forces/torques the robot will apply to its environment. The robot adjusts its position along/about compliant axis in order to achieve the specified force/torque.
:param limits: for compliant axes, these values are the maximum allowed tcp speed along/about the axis.
:return: code
code: See the API code documentation for details.
"""
return self._arm.config_force_control(coord, c_axis, f_ref, limits, **kwargs)
def set_force_control_pid(self, kp, ki, kd, xe_limit, **kwargs):
"""
Set force control pid parameters through the Six-axis Force Torque Sensor.
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:param kp: proportional gain.
:param ki: integral gain.
:param kd: differential gain.
:param xe_limit: 6d vector. for compliant axes, these values are the maximum allowed tcp speed along/about the axis. mm/s
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_force_control_pid(kp, ki, kd, xe_limit, **kwargs)
def ft_sensor_set_zero(self):
"""
Set the current state to the zero point of the Six-axis Force Torque Sensor
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:return: code
code: See the API code documentation for details.
"""
return self._arm.ft_sensor_set_zero()
def ft_sensor_iden_load(self):
"""
Identification the tcp load with the the Six-axis Force Torque Sensor
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:return: tuple((code, load)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
load: [mass,x_centroid,y_centroid,z_centroid,Fx_offset,Fy_offset,Fz_offset,Tx_offset,Ty_offset,Tz_ffset]
"""
return self._arm.ft_sensor_iden_load()
def ft_sensor_cali_load(self, iden_result_list, association_setting_tcp_load=False, **kwargs):
"""
Write the load offset parameters identified by the Six-axis Force Torque Sensor
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:param iden_result_list: [mass,x_centroid,y_centroid,z_centroid,Fx_offset,Fy_offset,Fz_offset,Tx_offset,Ty_offset,Tz_ffset]
:param association_setting_tcp_load: whether to convert the parameter to the corresponding tcp load and set, default is False
Note: If True, the value of tcp load will be modified
:return: code
code: See the API code documentation for details.
"""
return self._arm.ft_sensor_cali_load(iden_result_list, association_setting_tcp_load=association_setting_tcp_load, **kwargs)
def ft_sensor_enable(self, on_off):
"""
Used for enabling and disabling the use of the Six-axis Force Torque Sensor measurements in the controller.
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:param on_off: enable or disable F/T data sampling.
:return: code
code: See the API code documentation for details.
"""
return self._arm.ft_sensor_enable(on_off)
def ft_sensor_app_set(self, app_code):
"""
Set robot to be controlled in force mode. (Through the Six-axis Force Torque Sensor)
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:param app_code: force mode.
0: non-force mode
1: impendance control
2: force control
:return: code
code: See the API code documentation for details.
"""
return self._arm.ft_sensor_app_set(app_code)
def ft_sensor_app_get(self):
"""
Get force mode
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:return: tuple((code, app_code))
code: See the API code documentation for details.
app_code:
0: non-force mode
1: impedance control mode
2: force control mode
"""
return self._arm.ft_sensor_app_get()
def get_ft_sensor_data(self):
"""
Get the data of the Six-axis Force Torque Sensor
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:return: tuple((code, exe_ft))
code: See the API code documentation for details.
ft_data: only when code is 0, the returned result is correct.
Note: The external force detection value of the extenal force/torque sensor after filtering, load and offset compensation
"""
return self._arm.get_ft_sensor_data()
def get_ft_senfor_config(self):
"""
Get the config of the Six-axis Force Torque Sensor
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:return: tuple((code, config))
code: See the API code documentation for details.
config: [...], the config of the extenal force/torque, only when code is 0, the returned result is correct.
[0] ft_app_status: force mode
0: non-force mode
1: impendance control
2: force control
[1] ft_is_started: ft sensor is enable or not
[2] ft_type: ft sensor type
[3] ft_id: ft sensor id
[4] ft_freq: ft sensor frequency
[5] ft_mass: load mass
[6] ft_dir_bias: reversed
[7] ft_centroid: [x_centroid,y_centroid,z_centroid]
[8] ft_zero: [Fx_offset,Fy_offset,Fz_offset,Tx_offset,Ty_offset,Tz_ffset]
[9] imp_coord: task frame of impendance control mode.
0: base frame.
1: tool frame.
[10] imp_c_axis: a 6d vector of 0s and 1s. 1 means that robot will be impedance in the corresponding axis of the task frame.
[11] M: mass. (kg)
[12] K: stiffness coefficient.
[13] B: damping coefficient. invalid. Note: the value is set to 2*sqrt(M*K) in controller.
[14] f_coord: task frame of force control mode.
0: base frame.
1: tool frame.
[15] f_c_axis: a 6d vector of 0s and 1s. 1 means that robot will be impedance in the corresponding axis of the task frame.
[16] f_ref: the forces/torques the robot will apply to its environment. The robot adjusts its position along/about compliant axis in order to achieve the specified force/torque.
[17] f_limits: reversed.
[18] kp: proportional gain
[19] ki: integral gain.
[20] kd: differential gain.
[21] xe_limit: 6d vector. for compliant axes, these values are the maximum allowed tcp speed along/about the axis. mm/s
"""
return self._arm.get_ft_senfor_config()
def get_ft_sensor_error(self):
"""
Get the error code of the Six-axis Force Torque Sensor
Note:
1. only available if firmware_version >= 1.8.3
2. the Six-axis Force Torque Sensor is required (the third party is not currently supported)
:return: tuple((code, error))
code: See the API code documentation for details.
error: See the API code documentation for details.
"""
return self._arm.get_ft_sensor_error()
def iden_tcp_load(self):
"""
Identification the tcp load with current
Note:
1. only available if firmware_version >= 1.8.0
:return: tuple((code, load)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
load: [mass,x_centroid,y_centroid,z_centroid]
"""
return self._arm.iden_tcp_load()
def get_linear_track_registers(self, **kwargs):
"""
Get the status of the linear track
Note:
1. only available if firmware_version >= 1.8.0
:return: tuple((code, status)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
status: status, like
{
'pos': 0,
'status': 0,
'error': 0,
'is_enabled': 0,
'on_zero': 0,
'sci': 1,
'sco': [0, 0],
}
"""
return self._arm.get_linear_track_registers(**kwargs)
def get_linear_track_pos(self):
"""
Get the pos of the linear track
Note:
1. only available if firmware_version >= 1.8.0
:return: tuple((code, position)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
position: position
"""
return self._arm.get_linear_track_pos()
def get_linear_track_status(self):
"""
Get the status of the linear track
Note:
1. only available if firmware_version >= 1.8.0
:return: tuple((code, status)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
status: status
status & 0x00: motion finish
status & 0x01: in motion
status & 0x02: has stop
"""
return self._arm.get_linear_track_status()
def get_linear_track_error(self):
"""
Get the error code of the linear track
Note:
1. only available if firmware_version >= 1.8.0
:return: tuple((code, error)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
error: error code
"""
return self._arm.get_linear_track_error()
def get_linear_track_is_enabled(self):
"""
Get the linear track is enabled or not
Note:
1. only available if firmware_version >= 1.8.0
:return: tuple((code, status)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
status:
0: linear track is not enabled
1: linear track is enabled
"""
return self._arm.get_linear_track_is_enabled()
def get_linear_track_on_zero(self):
"""
Get the linear track is on zero positon or not
Note:
1. only available if firmware_version >= 1.8.0
:return: tuple((code, status)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
status:
0: linear track is not on zero
1: linear track is on zero
"""
return self._arm.get_linear_track_on_zero()
def get_linear_track_sci(self):
"""
Get the sci1 value of the linear track
Note:
1. only available if firmware_version >= 1.8.0
:return: tuple((code, sci1)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
"""
return self._arm.get_linear_track_sci()
def get_linear_track_sco(self):
"""
Get the sco value of the linear track
Note:
1. only available if firmware_version >= 1.8.0
:return: tuple((code, sco)) only when code is 0, the returned result is correct.
code: See the API code documentation for details.
sco: [sco0, sco1]
"""
return self._arm.get_linear_track_sco()
def clean_linear_track_error(self):
"""
Clean the linear track error
Note:
1. only available if firmware_version >= 1.8.0
:return: code
code: See the API code documentation for details.
"""
return self._arm.clean_linear_track_error()
def set_linear_track_enable(self, enable):
"""
Set the linear track enable/disable
Note:
1. only available if firmware_version >= 1.8.0
:param enable: enable or not
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_linear_track_enable(enable)
def set_linear_track_speed(self, speed):
"""
Set the speed of the linear track
Note:
1. only available if firmware_version >= 1.8.0
:param speed: Integer between 1 and 1000mm/s.
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_linear_track_speed(speed)
def set_linear_track_back_origin(self, wait=True, **kwargs):
"""
Set the linear track go back to the origin position
Note:
1. only available if firmware_version >= 1.8.0
2. only useful when powering on for the first time
3. this operation must be performed at the first power-on
:param wait: wait to motion finish or not, default is True
:param kwargs:
auto_enable: enable after back to origin or not, default is True
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_linear_track_back_origin(wait=wait, **kwargs)
def set_linear_track_pos(self, pos, speed=None, wait=True, timeout=100, **kwargs):
"""
Set the position of the linear track
Note:
1. only available if firmware_version >= 1.8.0
:param pos: position. Integer between 0 and 700/1000/1500mm.
If SN start with AL1300 the position range is 0~700mm.
If SN start with AL1301 the position range is 0~1000mm.
If SN start with AL1302 the position range is 0~1500mm.
:param speed: speed of the linear track. Integer between 1 and 1000mm/s. default is not set
:param wait: wait to motion finish or not, default is True
:param timeout: wait timeout, seconds, default is 100s.
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_linear_track_pos(pos, speed=speed, wait=wait, timeout=timeout, **kwargs)
def set_linear_track_stop(self):
"""
Set the linear track to stop
Note:
1. only available if firmware_version >= 1.8.0
:return: code
code: See the API code documentation for details.
"""
return self._arm.set_linear_track_stop()
def delete_blockly_app(self, name):
"""
Delete blockly app
:param name: blockly app name
:return: code
code: See the API code documentation for details.
"""
return self._studio.delete_blockly_app(name)
def delete_trajectory(self, name):
"""
Delete trajectory
:param name: trajectory name
:return: code
code: See the API code documentation for details.
"""
return self._studio.delete_trajectory(name)
def get_initial_point(self):
"""
Get the initial point from studio
:return: tuple((code, point)), only when code is 0, the returned result is correct.
code: See the API code documentation for details.
point: initial point, [J1, J2, ..., J7]
"""
return self._studio.get_initial_point()
def set_initial_point(self, point):
"""
Set the initial point
:param point: initial point, [J1, J2, ..., J7]
:return: code
code: See the API code documentation for details.
"""
return self._studio.set_initial_point(point)
|
xArm-Developer/xArm-Python-SDK
|
xarm/wrapper/xarm_api.py
|
Python
|
bsd-3-clause
| 148,949
|
from doorman import permissions
def can_do_stuff(*args, **kwargs):
return True
permissions.register('can_do_stuff', can_do_stuff)
def can_do_other_stuff(*args, **kwargs):
return True
|
seanbrant/django-doorman
|
tests/basicapp/permissions.py
|
Python
|
bsd-3-clause
| 197
|
__author__ = "Sebastien Celles"
__copyright__ = "Copyright 2014, celles.net"
__credits__ = ["Sebastien Celles"]
__license__ = "BSD"
__version__ = "0.0.5"
__maintainer__ = "Sebastien Celles"
__email__ = "s.celles@gmail.com"
__status__ = "Development"
__url__ = 'https://github.com/scls19fr/openweathermap_requests'
|
scls19fr/openweathermap_requests
|
openweathermap_requests/version.py
|
Python
|
bsd-3-clause
| 314
|
from django.conf.urls import patterns, url
urlpatterns = patterns('ui.views',
url(r'^', 'index'),
)
|
niksy/conference-web
|
ui/urls.py
|
Python
|
bsd-3-clause
| 107
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2016, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Detect the current architecture and operating system.
Some functions here are really from kernel32.dll, others from version.dll.
"""
from defines import *
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- NTDDI version ------------------------------------------------------------
NTDDI_WIN8 = 0x06020000
NTDDI_WIN7SP1 = 0x06010100
NTDDI_WIN7 = 0x06010000
NTDDI_WS08 = 0x06000100
NTDDI_VISTASP1 = 0x06000100
NTDDI_VISTA = 0x06000000
NTDDI_LONGHORN = NTDDI_VISTA
NTDDI_WS03SP2 = 0x05020200
NTDDI_WS03SP1 = 0x05020100
NTDDI_WS03 = 0x05020000
NTDDI_WINXPSP3 = 0x05010300
NTDDI_WINXPSP2 = 0x05010200
NTDDI_WINXPSP1 = 0x05010100
NTDDI_WINXP = 0x05010000
NTDDI_WIN2KSP4 = 0x05000400
NTDDI_WIN2KSP3 = 0x05000300
NTDDI_WIN2KSP2 = 0x05000200
NTDDI_WIN2KSP1 = 0x05000100
NTDDI_WIN2K = 0x05000000
NTDDI_WINNT4 = 0x04000000
OSVERSION_MASK = 0xFFFF0000
SPVERSION_MASK = 0x0000FF00
SUBVERSION_MASK = 0x000000FF
#--- OSVERSIONINFO and OSVERSIONINFOEX structures and constants ---------------
VER_PLATFORM_WIN32s = 0
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_SUITE_BACKOFFICE = 0x00000004
VER_SUITE_BLADE = 0x00000400
VER_SUITE_COMPUTE_SERVER = 0x00004000
VER_SUITE_DATACENTER = 0x00000080
VER_SUITE_ENTERPRISE = 0x00000002
VER_SUITE_EMBEDDEDNT = 0x00000040
VER_SUITE_PERSONAL = 0x00000200
VER_SUITE_SINGLEUSERTS = 0x00000100
VER_SUITE_SMALLBUSINESS = 0x00000001
VER_SUITE_SMALLBUSINESS_RESTRICTED = 0x00000020
VER_SUITE_STORAGE_SERVER = 0x00002000
VER_SUITE_TERMINAL = 0x00000010
VER_SUITE_WH_SERVER = 0x00008000
VER_NT_DOMAIN_CONTROLLER = 0x0000002
VER_NT_SERVER = 0x0000003
VER_NT_WORKSTATION = 0x0000001
VER_BUILDNUMBER = 0x0000004
VER_MAJORVERSION = 0x0000002
VER_MINORVERSION = 0x0000001
VER_PLATFORMID = 0x0000008
VER_PRODUCT_TYPE = 0x0000080
VER_SERVICEPACKMAJOR = 0x0000020
VER_SERVICEPACKMINOR = 0x0000010
VER_SUITENAME = 0x0000040
VER_EQUAL = 1
VER_GREATER = 2
VER_GREATER_EQUAL = 3
VER_LESS = 4
VER_LESS_EQUAL = 5
VER_AND = 6
VER_OR = 7
# typedef struct _OSVERSIONINFO {
# DWORD dwOSVersionInfoSize;
# DWORD dwMajorVersion;
# DWORD dwMinorVersion;
# DWORD dwBuildNumber;
# DWORD dwPlatformId;
# TCHAR szCSDVersion[128];
# }OSVERSIONINFO;
class OSVERSIONINFOA(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", CHAR * 128),
]
class OSVERSIONINFOW(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", WCHAR * 128),
]
# typedef struct _OSVERSIONINFOEX {
# DWORD dwOSVersionInfoSize;
# DWORD dwMajorVersion;
# DWORD dwMinorVersion;
# DWORD dwBuildNumber;
# DWORD dwPlatformId;
# TCHAR szCSDVersion[128];
# WORD wServicePackMajor;
# WORD wServicePackMinor;
# WORD wSuiteMask;
# BYTE wProductType;
# BYTE wReserved;
# }OSVERSIONINFOEX, *POSVERSIONINFOEX, *LPOSVERSIONINFOEX;
class OSVERSIONINFOEXA(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", CHAR * 128),
("wServicePackMajor", WORD),
("wServicePackMinor", WORD),
("wSuiteMask", WORD),
("wProductType", BYTE),
("wReserved", BYTE),
]
class OSVERSIONINFOEXW(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", WCHAR * 128),
("wServicePackMajor", WORD),
("wServicePackMinor", WORD),
("wSuiteMask", WORD),
("wProductType", BYTE),
("wReserved", BYTE),
]
LPOSVERSIONINFOA = POINTER(OSVERSIONINFOA)
LPOSVERSIONINFOW = POINTER(OSVERSIONINFOW)
LPOSVERSIONINFOEXA = POINTER(OSVERSIONINFOEXA)
LPOSVERSIONINFOEXW = POINTER(OSVERSIONINFOEXW)
POSVERSIONINFOA = LPOSVERSIONINFOA
POSVERSIONINFOW = LPOSVERSIONINFOW
POSVERSIONINFOEXA = LPOSVERSIONINFOEXA
POSVERSIONINFOEXW = LPOSVERSIONINFOA
#--- GetSystemMetrics constants -----------------------------------------------
SM_CXSCREEN = 0
SM_CYSCREEN = 1
SM_CXVSCROLL = 2
SM_CYHSCROLL = 3
SM_CYCAPTION = 4
SM_CXBORDER = 5
SM_CYBORDER = 6
SM_CXDLGFRAME = 7
SM_CYDLGFRAME = 8
SM_CYVTHUMB = 9
SM_CXHTHUMB = 10
SM_CXICON = 11
SM_CYICON = 12
SM_CXCURSOR = 13
SM_CYCURSOR = 14
SM_CYMENU = 15
SM_CXFULLSCREEN = 16
SM_CYFULLSCREEN = 17
SM_CYKANJIWINDOW = 18
SM_MOUSEPRESENT = 19
SM_CYVSCROLL = 20
SM_CXHSCROLL = 21
SM_DEBUG = 22
SM_SWAPBUTTON = 23
SM_RESERVED1 = 24
SM_RESERVED2 = 25
SM_RESERVED3 = 26
SM_RESERVED4 = 27
SM_CXMIN = 28
SM_CYMIN = 29
SM_CXSIZE = 30
SM_CYSIZE = 31
SM_CXFRAME = 32
SM_CYFRAME = 33
SM_CXMINTRACK = 34
SM_CYMINTRACK = 35
SM_CXDOUBLECLK = 36
SM_CYDOUBLECLK = 37
SM_CXICONSPACING = 38
SM_CYICONSPACING = 39
SM_MENUDROPALIGNMENT = 40
SM_PENWINDOWS = 41
SM_DBCSENABLED = 42
SM_CMOUSEBUTTONS = 43
SM_CXFIXEDFRAME = SM_CXDLGFRAME # ;win40 name change
SM_CYFIXEDFRAME = SM_CYDLGFRAME # ;win40 name change
SM_CXSIZEFRAME = SM_CXFRAME # ;win40 name change
SM_CYSIZEFRAME = SM_CYFRAME # ;win40 name change
SM_SECURE = 44
SM_CXEDGE = 45
SM_CYEDGE = 46
SM_CXMINSPACING = 47
SM_CYMINSPACING = 48
SM_CXSMICON = 49
SM_CYSMICON = 50
SM_CYSMCAPTION = 51
SM_CXSMSIZE = 52
SM_CYSMSIZE = 53
SM_CXMENUSIZE = 54
SM_CYMENUSIZE = 55
SM_ARRANGE = 56
SM_CXMINIMIZED = 57
SM_CYMINIMIZED = 58
SM_CXMAXTRACK = 59
SM_CYMAXTRACK = 60
SM_CXMAXIMIZED = 61
SM_CYMAXIMIZED = 62
SM_NETWORK = 63
SM_CLEANBOOT = 67
SM_CXDRAG = 68
SM_CYDRAG = 69
SM_SHOWSOUNDS = 70
SM_CXMENUCHECK = 71 # Use instead of GetMenuCheckMarkDimensions()!
SM_CYMENUCHECK = 72
SM_SLOWMACHINE = 73
SM_MIDEASTENABLED = 74
SM_MOUSEWHEELPRESENT = 75
SM_XVIRTUALSCREEN = 76
SM_YVIRTUALSCREEN = 77
SM_CXVIRTUALSCREEN = 78
SM_CYVIRTUALSCREEN = 79
SM_CMONITORS = 80
SM_SAMEDISPLAYFORMAT = 81
SM_IMMENABLED = 82
SM_CXFOCUSBORDER = 83
SM_CYFOCUSBORDER = 84
SM_TABLETPC = 86
SM_MEDIACENTER = 87
SM_STARTER = 88
SM_SERVERR2 = 89
SM_MOUSEHORIZONTALWHEELPRESENT = 91
SM_CXPADDEDBORDER = 92
SM_CMETRICS = 93
SM_REMOTESESSION = 0x1000
SM_SHUTTINGDOWN = 0x2000
SM_REMOTECONTROL = 0x2001
SM_CARETBLINKINGENABLED = 0x2002
#--- SYSTEM_INFO structure, GetSystemInfo() and GetNativeSystemInfo() ---------
# Values used by Wine
# Documented values at MSDN are marked with an asterisk
PROCESSOR_ARCHITECTURE_UNKNOWN = 0xFFFF; # Unknown architecture.
PROCESSOR_ARCHITECTURE_INTEL = 0 # x86 (AMD or Intel) *
PROCESSOR_ARCHITECTURE_MIPS = 1 # MIPS
PROCESSOR_ARCHITECTURE_ALPHA = 2 # Alpha
PROCESSOR_ARCHITECTURE_PPC = 3 # Power PC
PROCESSOR_ARCHITECTURE_SHX = 4 # SHX
PROCESSOR_ARCHITECTURE_ARM = 5 # ARM
PROCESSOR_ARCHITECTURE_IA64 = 6 # Intel Itanium *
PROCESSOR_ARCHITECTURE_ALPHA64 = 7 # Alpha64
PROCESSOR_ARCHITECTURE_MSIL = 8 # MSIL
PROCESSOR_ARCHITECTURE_AMD64 = 9 # x64 (AMD or Intel) *
PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10 # IA32 on Win64
PROCESSOR_ARCHITECTURE_SPARC = 20 # Sparc (Wine)
# Values used by Wine
# PROCESSOR_OPTIL value found at http://code.google.com/p/ddab-lib/
# Documented values at MSDN are marked with an asterisk
PROCESSOR_INTEL_386 = 386 # Intel i386 *
PROCESSOR_INTEL_486 = 486 # Intel i486 *
PROCESSOR_INTEL_PENTIUM = 586 # Intel Pentium *
PROCESSOR_INTEL_IA64 = 2200 # Intel IA64 (Itanium) *
PROCESSOR_AMD_X8664 = 8664 # AMD X86 64 *
PROCESSOR_MIPS_R4000 = 4000 # MIPS R4000, R4101, R3910
PROCESSOR_ALPHA_21064 = 21064 # Alpha 210 64
PROCESSOR_PPC_601 = 601 # PPC 601
PROCESSOR_PPC_603 = 603 # PPC 603
PROCESSOR_PPC_604 = 604 # PPC 604
PROCESSOR_PPC_620 = 620 # PPC 620
PROCESSOR_HITACHI_SH3 = 10003 # Hitachi SH3 (Windows CE)
PROCESSOR_HITACHI_SH3E = 10004 # Hitachi SH3E (Windows CE)
PROCESSOR_HITACHI_SH4 = 10005 # Hitachi SH4 (Windows CE)
PROCESSOR_MOTOROLA_821 = 821 # Motorola 821 (Windows CE)
PROCESSOR_SHx_SH3 = 103 # SHx SH3 (Windows CE)
PROCESSOR_SHx_SH4 = 104 # SHx SH4 (Windows CE)
PROCESSOR_STRONGARM = 2577 # StrongARM (Windows CE)
PROCESSOR_ARM720 = 1824 # ARM 720 (Windows CE)
PROCESSOR_ARM820 = 2080 # ARM 820 (Windows CE)
PROCESSOR_ARM920 = 2336 # ARM 920 (Windows CE)
PROCESSOR_ARM_7TDMI = 70001 # ARM 7TDMI (Windows CE)
PROCESSOR_OPTIL = 0x494F # MSIL
# typedef struct _SYSTEM_INFO {
# union {
# DWORD dwOemId;
# struct {
# WORD wProcessorArchitecture;
# WORD wReserved;
# } ;
# } ;
# DWORD dwPageSize;
# LPVOID lpMinimumApplicationAddress;
# LPVOID lpMaximumApplicationAddress;
# DWORD_PTR dwActiveProcessorMask;
# DWORD dwNumberOfProcessors;
# DWORD dwProcessorType;
# DWORD dwAllocationGranularity;
# WORD wProcessorLevel;
# WORD wProcessorRevision;
# } SYSTEM_INFO;
class _SYSTEM_INFO_OEM_ID_STRUCT(Structure):
_fields_ = [
("wProcessorArchitecture", WORD),
("wReserved", WORD),
]
class _SYSTEM_INFO_OEM_ID(Union):
_fields_ = [
("dwOemId", DWORD),
("w", _SYSTEM_INFO_OEM_ID_STRUCT),
]
class SYSTEM_INFO(Structure):
_fields_ = [
("id", _SYSTEM_INFO_OEM_ID),
("dwPageSize", DWORD),
("lpMinimumApplicationAddress", LPVOID),
("lpMaximumApplicationAddress", LPVOID),
("dwActiveProcessorMask", DWORD_PTR),
("dwNumberOfProcessors", DWORD),
("dwProcessorType", DWORD),
("dwAllocationGranularity", DWORD),
("wProcessorLevel", WORD),
("wProcessorRevision", WORD),
]
def __get_dwOemId(self):
return self.id.dwOemId
def __set_dwOemId(self, value):
self.id.dwOemId = value
dwOemId = property(__get_dwOemId, __set_dwOemId)
def __get_wProcessorArchitecture(self):
return self.id.w.wProcessorArchitecture
def __set_wProcessorArchitecture(self, value):
self.id.w.wProcessorArchitecture = value
wProcessorArchitecture = property(__get_wProcessorArchitecture, __set_wProcessorArchitecture)
LPSYSTEM_INFO = ctypes.POINTER(SYSTEM_INFO)
# void WINAPI GetSystemInfo(
# __out LPSYSTEM_INFO lpSystemInfo
# );
def GetSystemInfo():
_GetSystemInfo = windll.kernel32.GetSystemInfo
_GetSystemInfo.argtypes = [LPSYSTEM_INFO]
_GetSystemInfo.restype = None
sysinfo = SYSTEM_INFO()
_GetSystemInfo(byref(sysinfo))
return sysinfo
# void WINAPI GetNativeSystemInfo(
# __out LPSYSTEM_INFO lpSystemInfo
# );
def GetNativeSystemInfo():
_GetNativeSystemInfo = windll.kernel32.GetNativeSystemInfo
_GetNativeSystemInfo.argtypes = [LPSYSTEM_INFO]
_GetNativeSystemInfo.restype = None
sysinfo = SYSTEM_INFO()
_GetNativeSystemInfo(byref(sysinfo))
return sysinfo
# int WINAPI GetSystemMetrics(
# __in int nIndex
# );
def GetSystemMetrics(nIndex):
_GetSystemMetrics = windll.user32.GetSystemMetrics
_GetSystemMetrics.argtypes = [ctypes.c_int]
_GetSystemMetrics.restype = ctypes.c_int
return _GetSystemMetrics(nIndex)
# SIZE_T WINAPI GetLargePageMinimum(void);
def GetLargePageMinimum():
_GetLargePageMinimum = windll.user32.GetLargePageMinimum
_GetLargePageMinimum.argtypes = []
_GetLargePageMinimum.restype = SIZE_T
return _GetLargePageMinimum()
# HANDLE WINAPI GetCurrentProcess(void);
def GetCurrentProcess():
## return 0xFFFFFFFFFFFFFFFFL
_GetCurrentProcess = windll.kernel32.GetCurrentProcess
_GetCurrentProcess.argtypes = []
_GetCurrentProcess.restype = HANDLE
return _GetCurrentProcess()
# HANDLE WINAPI GetCurrentThread(void);
def GetCurrentThread():
## return 0xFFFFFFFFFFFFFFFEL
_GetCurrentThread = windll.kernel32.GetCurrentThread
_GetCurrentThread.argtypes = []
_GetCurrentThread.restype = HANDLE
return _GetCurrentThread()
# BOOL WINAPI IsWow64Process(
# __in HANDLE hProcess,
# __out PBOOL Wow64Process
# );
def IsWow64Process(hProcess):
_IsWow64Process = windll.kernel32.IsWow64Process
_IsWow64Process.argtypes = [HANDLE, PBOOL]
_IsWow64Process.restype = bool
_IsWow64Process.errcheck = RaiseIfZero
Wow64Process = BOOL(FALSE)
_IsWow64Process(hProcess, byref(Wow64Process))
return bool(Wow64Process)
# DWORD WINAPI GetVersion(void);
def GetVersion():
_GetVersion = windll.kernel32.GetVersion
_GetVersion.argtypes = []
_GetVersion.restype = DWORD
_GetVersion.errcheck = RaiseIfZero
# See the example code here:
# http://msdn.microsoft.com/en-us/library/ms724439(VS.85).aspx
dwVersion = _GetVersion()
dwMajorVersion = dwVersion & 0x000000FF
dwMinorVersion = (dwVersion & 0x0000FF00) >> 8
if (dwVersion & 0x80000000) == 0:
dwBuild = (dwVersion & 0x7FFF0000) >> 16
else:
dwBuild = None
return int(dwMajorVersion), int(dwMinorVersion), int(dwBuild)
# BOOL WINAPI GetVersionEx(
# __inout LPOSVERSIONINFO lpVersionInfo
# );
def GetVersionExA():
_GetVersionExA = windll.kernel32.GetVersionExA
_GetVersionExA.argtypes = [POINTER(OSVERSIONINFOEXA)]
_GetVersionExA.restype = bool
_GetVersionExA.errcheck = RaiseIfZero
osi = OSVERSIONINFOEXA()
osi.dwOSVersionInfoSize = sizeof(osi)
try:
_GetVersionExA(byref(osi))
except WindowsError:
osi = OSVERSIONINFOA()
osi.dwOSVersionInfoSize = sizeof(osi)
_GetVersionExA.argtypes = [POINTER(OSVERSIONINFOA)]
_GetVersionExA(byref(osi))
return osi
def GetVersionExW():
_GetVersionExW = windll.kernel32.GetVersionExW
_GetVersionExW.argtypes = [POINTER(OSVERSIONINFOEXW)]
_GetVersionExW.restype = bool
_GetVersionExW.errcheck = RaiseIfZero
osi = OSVERSIONINFOEXW()
osi.dwOSVersionInfoSize = sizeof(osi)
try:
_GetVersionExW(byref(osi))
except WindowsError:
osi = OSVERSIONINFOW()
osi.dwOSVersionInfoSize = sizeof(osi)
_GetVersionExW.argtypes = [POINTER(OSVERSIONINFOW)]
_GetVersionExW(byref(osi))
return osi
GetVersionEx = GuessStringType(GetVersionExA, GetVersionExW)
# BOOL WINAPI GetProductInfo(
# __in DWORD dwOSMajorVersion,
# __in DWORD dwOSMinorVersion,
# __in DWORD dwSpMajorVersion,
# __in DWORD dwSpMinorVersion,
# __out PDWORD pdwReturnedProductType
# );
def GetProductInfo(dwOSMajorVersion, dwOSMinorVersion, dwSpMajorVersion, dwSpMinorVersion):
_GetProductInfo = windll.kernel32.GetProductInfo
_GetProductInfo.argtypes = [DWORD, DWORD, DWORD, DWORD, PDWORD]
_GetProductInfo.restype = BOOL
_GetProductInfo.errcheck = RaiseIfZero
dwReturnedProductType = DWORD(0)
_GetProductInfo(dwOSMajorVersion, dwOSMinorVersion, dwSpMajorVersion, dwSpMinorVersion, byref(dwReturnedProductType))
return dwReturnedProductType.value
# BOOL WINAPI VerifyVersionInfo(
# __in LPOSVERSIONINFOEX lpVersionInfo,
# __in DWORD dwTypeMask,
# __in DWORDLONG dwlConditionMask
# );
def VerifyVersionInfo(lpVersionInfo, dwTypeMask, dwlConditionMask):
if isinstance(lpVersionInfo, OSVERSIONINFOEXA):
return VerifyVersionInfoA(lpVersionInfo, dwTypeMask, dwlConditionMask)
if isinstance(lpVersionInfo, OSVERSIONINFOEXW):
return VerifyVersionInfoW(lpVersionInfo, dwTypeMask, dwlConditionMask)
raise TypeError("Bad OSVERSIONINFOEX structure")
def VerifyVersionInfoA(lpVersionInfo, dwTypeMask, dwlConditionMask):
_VerifyVersionInfoA = windll.kernel32.VerifyVersionInfoA
_VerifyVersionInfoA.argtypes = [LPOSVERSIONINFOEXA, DWORD, DWORDLONG]
_VerifyVersionInfoA.restype = bool
return _VerifyVersionInfoA(byref(lpVersionInfo), dwTypeMask, dwlConditionMask)
def VerifyVersionInfoW(lpVersionInfo, dwTypeMask, dwlConditionMask):
_VerifyVersionInfoW = windll.kernel32.VerifyVersionInfoW
_VerifyVersionInfoW.argtypes = [LPOSVERSIONINFOEXW, DWORD, DWORDLONG]
_VerifyVersionInfoW.restype = bool
return _VerifyVersionInfoW(byref(lpVersionInfo), dwTypeMask, dwlConditionMask)
# ULONGLONG WINAPI VerSetConditionMask(
# __in ULONGLONG dwlConditionMask,
# __in DWORD dwTypeBitMask,
# __in BYTE dwConditionMask
# );
def VerSetConditionMask(dwlConditionMask, dwTypeBitMask, dwConditionMask):
_VerSetConditionMask = windll.kernel32.VerSetConditionMask
_VerSetConditionMask.argtypes = [ULONGLONG, DWORD, BYTE]
_VerSetConditionMask.restype = ULONGLONG
return _VerSetConditionMask(dwlConditionMask, dwTypeBitMask, dwConditionMask)
#--- get_bits, get_arch and get_os --------------------------------------------
ARCH_UNKNOWN = "unknown"
ARCH_I386 = "i386"
ARCH_MIPS = "mips"
ARCH_ALPHA = "alpha"
ARCH_PPC = "ppc"
ARCH_SHX = "shx"
ARCH_ARM = "arm"
ARCH_ARM64 = "arm64"
ARCH_THUMB = "thumb"
ARCH_IA64 = "ia64"
ARCH_ALPHA64 = "alpha64"
ARCH_MSIL = "msil"
ARCH_AMD64 = "amd64"
ARCH_SPARC = "sparc"
# aliases
ARCH_IA32 = ARCH_I386
ARCH_X86 = ARCH_I386
ARCH_X64 = ARCH_AMD64
ARCH_ARM7 = ARCH_ARM
ARCH_ARM8 = ARCH_ARM64
ARCH_T32 = ARCH_THUMB
ARCH_AARCH32 = ARCH_ARM7
ARCH_AARCH64 = ARCH_ARM8
ARCH_POWERPC = ARCH_PPC
ARCH_HITACHI = ARCH_SHX
ARCH_ITANIUM = ARCH_IA64
# win32 constants -> our constants
_arch_map = {
PROCESSOR_ARCHITECTURE_INTEL : ARCH_I386,
PROCESSOR_ARCHITECTURE_MIPS : ARCH_MIPS,
PROCESSOR_ARCHITECTURE_ALPHA : ARCH_ALPHA,
PROCESSOR_ARCHITECTURE_PPC : ARCH_PPC,
PROCESSOR_ARCHITECTURE_SHX : ARCH_SHX,
PROCESSOR_ARCHITECTURE_ARM : ARCH_ARM,
PROCESSOR_ARCHITECTURE_IA64 : ARCH_IA64,
PROCESSOR_ARCHITECTURE_ALPHA64 : ARCH_ALPHA64,
PROCESSOR_ARCHITECTURE_MSIL : ARCH_MSIL,
PROCESSOR_ARCHITECTURE_AMD64 : ARCH_AMD64,
PROCESSOR_ARCHITECTURE_SPARC : ARCH_SPARC,
}
OS_UNKNOWN = "Unknown"
OS_NT = "Windows NT"
OS_W2K = "Windows 2000"
OS_XP = "Windows XP"
OS_XP_64 = "Windows XP (64 bits)"
OS_W2K3 = "Windows 2003"
OS_W2K3_64 = "Windows 2003 (64 bits)"
OS_W2K3R2 = "Windows 2003 R2"
OS_W2K3R2_64 = "Windows 2003 R2 (64 bits)"
OS_W2K8 = "Windows 2008"
OS_W2K8_64 = "Windows 2008 (64 bits)"
OS_W2K8R2 = "Windows 2008 R2"
OS_W2K8R2_64 = "Windows 2008 R2 (64 bits)"
OS_VISTA = "Windows Vista"
OS_VISTA_64 = "Windows Vista (64 bits)"
OS_W7 = "Windows 7"
OS_W7_64 = "Windows 7 (64 bits)"
OS_SEVEN = OS_W7
OS_SEVEN_64 = OS_W7_64
OS_WINDOWS_NT = OS_NT
OS_WINDOWS_2000 = OS_W2K
OS_WINDOWS_XP = OS_XP
OS_WINDOWS_XP_64 = OS_XP_64
OS_WINDOWS_2003 = OS_W2K3
OS_WINDOWS_2003_64 = OS_W2K3_64
OS_WINDOWS_2003_R2 = OS_W2K3R2
OS_WINDOWS_2003_R2_64 = OS_W2K3R2_64
OS_WINDOWS_2008 = OS_W2K8
OS_WINDOWS_2008_64 = OS_W2K8_64
OS_WINDOWS_2008_R2 = OS_W2K8R2
OS_WINDOWS_2008_R2_64 = OS_W2K8R2_64
OS_WINDOWS_VISTA = OS_VISTA
OS_WINDOWS_VISTA_64 = OS_VISTA_64
OS_WINDOWS_SEVEN = OS_W7
OS_WINDOWS_SEVEN_64 = OS_W7_64
def _get_bits():
"""
Determines the current integer size in bits.
This is useful to know if we're running in a 32 bits or a 64 bits machine.
@rtype: int
@return: Returns the size of L{SIZE_T} in bits.
"""
return sizeof(SIZE_T) * 8
def _get_arch():
"""
Determines the current processor architecture.
@rtype: str
@return:
On error, returns:
- L{ARCH_UNKNOWN} (C{"unknown"}) meaning the architecture could not be detected or is not known to WinAppDbg.
On success, returns one of the following values:
- L{ARCH_I386} (C{"i386"}) for Intel 32-bit x86 processor or compatible.
- L{ARCH_AMD64} (C{"amd64"}) for Intel 64-bit x86_64 processor or compatible.
May also return one of the following values if you get both Python and
WinAppDbg to work in such machines... let me know if you do! :)
- L{ARCH_MIPS} (C{"mips"}) for MIPS compatible processors.
- L{ARCH_ALPHA} (C{"alpha"}) for Alpha processors.
- L{ARCH_PPC} (C{"ppc"}) for PowerPC compatible processors.
- L{ARCH_SHX} (C{"shx"}) for Hitachi SH processors.
- L{ARCH_ARM} (C{"arm"}) for ARM compatible processors.
- L{ARCH_IA64} (C{"ia64"}) for Intel Itanium processor or compatible.
- L{ARCH_ALPHA64} (C{"alpha64"}) for Alpha64 processors.
- L{ARCH_MSIL} (C{"msil"}) for the .NET virtual machine.
- L{ARCH_SPARC} (C{"sparc"}) for Sun Sparc processors.
Probably IronPython returns C{ARCH_MSIL} but I haven't tried it. Python
on Windows CE and Windows Mobile should return C{ARCH_ARM}. Python on
Solaris using Wine would return C{ARCH_SPARC}. Python in an Itanium
machine should return C{ARCH_IA64} both on Wine and proper Windows.
All other values should only be returned on Linux using Wine.
"""
try:
si = GetNativeSystemInfo()
except Exception:
si = GetSystemInfo()
try:
return _arch_map[si.id.w.wProcessorArchitecture]
except KeyError:
return ARCH_UNKNOWN
def _get_wow64():
"""
Determines if the current process is running in Windows-On-Windows 64 bits.
@rtype: bool
@return: C{True} of the current process is a 32 bit program running in a
64 bit version of Windows, C{False} if it's either a 32 bit program
in a 32 bit Windows or a 64 bit program in a 64 bit Windows.
"""
# Try to determine if the debugger itself is running on WOW64.
# On error assume False.
if bits == 64:
wow64 = False
else:
try:
wow64 = IsWow64Process( GetCurrentProcess() )
except Exception:
wow64 = False
return wow64
def _get_os(osvi = None):
"""
Determines the current operating system.
This function allows you to quickly tell apart major OS differences.
For more detailed information call L{GetVersionEx} instead.
@note:
Wine reports itself as Windows XP 32 bits
(even if the Linux host is 64 bits).
ReactOS may report itself as Windows 2000 or Windows XP,
depending on the version of ReactOS.
@type osvi: L{OSVERSIONINFOEXA}
@param osvi: Optional. The return value from L{GetVersionEx}.
@rtype: str
@return:
One of the following values:
- L{OS_UNKNOWN} (C{"Unknown"})
- L{OS_NT} (C{"Windows NT"})
- L{OS_W2K} (C{"Windows 2000"})
- L{OS_XP} (C{"Windows XP"})
- L{OS_XP_64} (C{"Windows XP (64 bits)"})
- L{OS_W2K3} (C{"Windows 2003"})
- L{OS_W2K3_64} (C{"Windows 2003 (64 bits)"})
- L{OS_W2K3R2} (C{"Windows 2003 R2"})
- L{OS_W2K3R2_64} (C{"Windows 2003 R2 (64 bits)"})
- L{OS_W2K8} (C{"Windows 2008"})
- L{OS_W2K8_64} (C{"Windows 2008 (64 bits)"})
- L{OS_W2K8R2} (C{"Windows 2008 R2"})
- L{OS_W2K8R2_64} (C{"Windows 2008 R2 (64 bits)"})
- L{OS_VISTA} (C{"Windows Vista"})
- L{OS_VISTA_64} (C{"Windows Vista (64 bits)"})
- L{OS_W7} (C{"Windows 7"})
- L{OS_W7_64} (C{"Windows 7 (64 bits)"})
"""
# rough port of http://msdn.microsoft.com/en-us/library/ms724429%28VS.85%29.aspx
if not osvi:
osvi = GetVersionEx()
if osvi.dwPlatformId == VER_PLATFORM_WIN32_NT and osvi.dwMajorVersion > 4:
if osvi.dwMajorVersion == 6:
if osvi.dwMinorVersion == 0:
if osvi.wProductType == VER_NT_WORKSTATION:
if bits == 64 or wow64:
return 'Windows Vista (64 bits)'
return 'Windows Vista'
else:
if bits == 64 or wow64:
return 'Windows 2008 (64 bits)'
return 'Windows 2008'
if osvi.dwMinorVersion == 1:
if osvi.wProductType == VER_NT_WORKSTATION:
if bits == 64 or wow64:
return 'Windows 7 (64 bits)'
return 'Windows 7'
else:
if bits == 64 or wow64:
return 'Windows 2008 R2 (64 bits)'
return 'Windows 2008 R2'
if osvi.dwMajorVersion == 5:
if osvi.dwMinorVersion == 2:
if GetSystemMetrics(SM_SERVERR2):
if bits == 64 or wow64:
return 'Windows 2003 R2 (64 bits)'
return 'Windows 2003 R2'
if osvi.wSuiteMask in (VER_SUITE_STORAGE_SERVER, VER_SUITE_WH_SERVER):
if bits == 64 or wow64:
return 'Windows 2003 (64 bits)'
return 'Windows 2003'
if osvi.wProductType == VER_NT_WORKSTATION and arch == ARCH_AMD64:
return 'Windows XP (64 bits)'
else:
if bits == 64 or wow64:
return 'Windows 2003 (64 bits)'
return 'Windows 2003'
if osvi.dwMinorVersion == 1:
return 'Windows XP'
if osvi.dwMinorVersion == 0:
return 'Windows 2000'
if osvi.dwMajorVersion == 4:
return 'Windows NT'
return 'Unknown'
def _get_ntddi(osvi):
"""
Determines the current operating system.
This function allows you to quickly tell apart major OS differences.
For more detailed information call L{kernel32.GetVersionEx} instead.
@note:
Wine reports itself as Windows XP 32 bits
(even if the Linux host is 64 bits).
ReactOS may report itself as Windows 2000 or Windows XP,
depending on the version of ReactOS.
@type osvi: L{OSVERSIONINFOEXA}
@param osvi: Optional. The return value from L{kernel32.GetVersionEx}.
@rtype: int
@return: NTDDI version number.
"""
if not osvi:
osvi = GetVersionEx()
ntddi = 0
ntddi += (osvi.dwMajorVersion & 0xFF) << 24
ntddi += (osvi.dwMinorVersion & 0xFF) << 16
ntddi += (osvi.wServicePackMajor & 0xFF) << 8
ntddi += (osvi.wServicePackMinor & 0xFF)
return ntddi
# The order of the following definitions DOES matter!
# Current integer size in bits. See L{_get_bits} for more details.
bits = _get_bits()
# Current processor architecture. See L{_get_arch} for more details.
arch = _get_arch()
# Set to C{True} if the current process is running in WOW64. See L{_get_wow64} for more details.
wow64 = _get_wow64()
_osvi = GetVersionEx()
# Current operating system. See L{_get_os} for more details.
os = _get_os(_osvi)
# Current operating system as an NTDDI constant. See L{_get_ntddi} for more details.
NTDDI_VERSION = _get_ntddi(_osvi)
# Upper word of L{NTDDI_VERSION}, contains the OS major and minor version number.
WINVER = NTDDI_VERSION >> 16
#--- version.dll --------------------------------------------------------------
VS_FF_DEBUG = 0x00000001
VS_FF_PRERELEASE = 0x00000002
VS_FF_PATCHED = 0x00000004
VS_FF_PRIVATEBUILD = 0x00000008
VS_FF_INFOINFERRED = 0x00000010
VS_FF_SPECIALBUILD = 0x00000020
VOS_UNKNOWN = 0x00000000
VOS__WINDOWS16 = 0x00000001
VOS__PM16 = 0x00000002
VOS__PM32 = 0x00000003
VOS__WINDOWS32 = 0x00000004
VOS_DOS = 0x00010000
VOS_OS216 = 0x00020000
VOS_OS232 = 0x00030000
VOS_NT = 0x00040000
VOS_DOS_WINDOWS16 = 0x00010001
VOS_DOS_WINDOWS32 = 0x00010004
VOS_NT_WINDOWS32 = 0x00040004
VOS_OS216_PM16 = 0x00020002
VOS_OS232_PM32 = 0x00030003
VFT_UNKNOWN = 0x00000000
VFT_APP = 0x00000001
VFT_DLL = 0x00000002
VFT_DRV = 0x00000003
VFT_FONT = 0x00000004
VFT_VXD = 0x00000005
VFT_RESERVED = 0x00000006 # undocumented
VFT_STATIC_LIB = 0x00000007
VFT2_UNKNOWN = 0x00000000
VFT2_DRV_PRINTER = 0x00000001
VFT2_DRV_KEYBOARD = 0x00000002
VFT2_DRV_LANGUAGE = 0x00000003
VFT2_DRV_DISPLAY = 0x00000004
VFT2_DRV_MOUSE = 0x00000005
VFT2_DRV_NETWORK = 0x00000006
VFT2_DRV_SYSTEM = 0x00000007
VFT2_DRV_INSTALLABLE = 0x00000008
VFT2_DRV_SOUND = 0x00000009
VFT2_DRV_COMM = 0x0000000A
VFT2_DRV_RESERVED = 0x0000000B # undocumented
VFT2_DRV_VERSIONED_PRINTER = 0x0000000C
VFT2_FONT_RASTER = 0x00000001
VFT2_FONT_VECTOR = 0x00000002
VFT2_FONT_TRUETYPE = 0x00000003
# typedef struct tagVS_FIXEDFILEINFO {
# DWORD dwSignature;
# DWORD dwStrucVersion;
# DWORD dwFileVersionMS;
# DWORD dwFileVersionLS;
# DWORD dwProductVersionMS;
# DWORD dwProductVersionLS;
# DWORD dwFileFlagsMask;
# DWORD dwFileFlags;
# DWORD dwFileOS;
# DWORD dwFileType;
# DWORD dwFileSubtype;
# DWORD dwFileDateMS;
# DWORD dwFileDateLS;
# } VS_FIXEDFILEINFO;
class VS_FIXEDFILEINFO(Structure):
_fields_ = [
("dwSignature", DWORD),
("dwStrucVersion", DWORD),
("dwFileVersionMS", DWORD),
("dwFileVersionLS", DWORD),
("dwProductVersionMS", DWORD),
("dwProductVersionLS", DWORD),
("dwFileFlagsMask", DWORD),
("dwFileFlags", DWORD),
("dwFileOS", DWORD),
("dwFileType", DWORD),
("dwFileSubtype", DWORD),
("dwFileDateMS", DWORD),
("dwFileDateLS", DWORD),
]
PVS_FIXEDFILEINFO = POINTER(VS_FIXEDFILEINFO)
LPVS_FIXEDFILEINFO = PVS_FIXEDFILEINFO
# BOOL WINAPI GetFileVersionInfo(
# _In_ LPCTSTR lptstrFilename,
# _Reserved_ DWORD dwHandle,
# _In_ DWORD dwLen,
# _Out_ LPVOID lpData
# );
# DWORD WINAPI GetFileVersionInfoSize(
# _In_ LPCTSTR lptstrFilename,
# _Out_opt_ LPDWORD lpdwHandle
# );
def GetFileVersionInfoA(lptstrFilename):
_GetFileVersionInfoA = windll.version.GetFileVersionInfoA
_GetFileVersionInfoA.argtypes = [LPSTR, DWORD, DWORD, LPVOID]
_GetFileVersionInfoA.restype = bool
_GetFileVersionInfoA.errcheck = RaiseIfZero
_GetFileVersionInfoSizeA = windll.version.GetFileVersionInfoSizeA
_GetFileVersionInfoSizeA.argtypes = [LPSTR, LPVOID]
_GetFileVersionInfoSizeA.restype = DWORD
_GetFileVersionInfoSizeA.errcheck = RaiseIfZero
dwLen = _GetFileVersionInfoSizeA(lptstrFilename, None)
lpData = ctypes.create_string_buffer(dwLen)
_GetFileVersionInfoA(lptstrFilename, 0, dwLen, byref(lpData))
return lpData
def GetFileVersionInfoW(lptstrFilename):
_GetFileVersionInfoW = windll.version.GetFileVersionInfoW
_GetFileVersionInfoW.argtypes = [LPWSTR, DWORD, DWORD, LPVOID]
_GetFileVersionInfoW.restype = bool
_GetFileVersionInfoW.errcheck = RaiseIfZero
_GetFileVersionInfoSizeW = windll.version.GetFileVersionInfoSizeW
_GetFileVersionInfoSizeW.argtypes = [LPWSTR, LPVOID]
_GetFileVersionInfoSizeW.restype = DWORD
_GetFileVersionInfoSizeW.errcheck = RaiseIfZero
dwLen = _GetFileVersionInfoSizeW(lptstrFilename, None)
lpData = ctypes.create_string_buffer(dwLen) # not a string!
_GetFileVersionInfoW(lptstrFilename, 0, dwLen, byref(lpData))
return lpData
GetFileVersionInfo = GuessStringType(GetFileVersionInfoA, GetFileVersionInfoW)
# BOOL WINAPI VerQueryValue(
# _In_ LPCVOID pBlock,
# _In_ LPCTSTR lpSubBlock,
# _Out_ LPVOID *lplpBuffer,
# _Out_ PUINT puLen
# );
def VerQueryValueA(pBlock, lpSubBlock):
_VerQueryValueA = windll.version.VerQueryValueA
_VerQueryValueA.argtypes = [LPVOID, LPSTR, LPVOID, POINTER(UINT)]
_VerQueryValueA.restype = bool
_VerQueryValueA.errcheck = RaiseIfZero
lpBuffer = LPVOID(0)
uLen = UINT(0)
_VerQueryValueA(pBlock, lpSubBlock, byref(lpBuffer), byref(uLen))
return lpBuffer, uLen.value
def VerQueryValueW(pBlock, lpSubBlock):
_VerQueryValueW = windll.version.VerQueryValueW
_VerQueryValueW.argtypes = [LPVOID, LPWSTR, LPVOID, POINTER(UINT)]
_VerQueryValueW.restype = bool
_VerQueryValueW.errcheck = RaiseIfZero
lpBuffer = LPVOID(0)
uLen = UINT(0)
_VerQueryValueW(pBlock, lpSubBlock, byref(lpBuffer), byref(uLen))
return lpBuffer, uLen.value
VerQueryValue = GuessStringType(VerQueryValueA, VerQueryValueW)
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
|
debasishm89/OpenXMolar
|
ExtDepLibs/winappdbg/win32/version.py
|
Python
|
bsd-3-clause
| 36,774
|
from tastypie import fields
from tastypie.resources import ModelResource
from public_project.models import ProjectPart, Question, Participant, Event, Page, Document
class ProjectPartsResource(ModelResource):
class Meta:
queryset = ProjectPart.objects.all()
resource_name = 'project_parts'
excludes = ['comments',]
allowed_methods = ['get',]
class QuestionsResource(ModelResource):
class Meta:
queryset = Question.objects.all()
resource_name = 'questions'
excludes = ['comments',]
allowed_methods = ['get',]
class ParticipantsResource(ModelResource):
class Meta:
queryset = Participant.objects.all()
resource_name = 'participants'
excludes = ['comments',]
allowed_methods = ['get',]
class EventsResource(ModelResource):
class Meta:
queryset = Event.objects.all()
resource_name = 'events'
excludes = ['comments',]
allowed_methods = ['get',]
class PagesResource(ModelResource):
class Meta:
queryset = Page.objects.all()
resource_name = 'pages'
allowed_methods = ['get',]
class DocumentsResource(ModelResource):
pages = fields.ToManyField(PagesResource, 'page_set', null=True, full=True)
class Meta:
queryset = Document.objects.all()
limit = 5
resource_name = 'documents'
excludes = ['pdf_images_generated', 'document', 'comments',]
allowed_methods = ['get',]
|
holgerd77/django-public-project
|
public_project/api.py
|
Python
|
bsd-3-clause
| 1,526
|
#!/usr/bin/env python
import os
import math
import rosbag
import actionlib
import rospy
from trajectory_msgs.msg import *
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached
# from flexbe_behaviors.atlas_definitions import AtlasDefinitions
'''
Created on 04/26/2015
@author: Moritz Schappler
'''
class GenerateTrajectoryFromTxtfileState(EventState):
'''
Implements a state that loads (arm) trajectories stored in a text file.
Copied from LoadTrajectoryFromBagfileState
-- chains list() dict with file paths as values and chain names as keys
-- transitiontime int Time between two positions
-- settlingtime int Time after reaching a position
># txtfilepaths dict() list with strings of joint chains like "left_arm", ...
#> trajectories JointTrajectory{} A dictionary where the keys are ['left_arm', 'right_arm'] and each has a trajectory as the value.
<= done Trajectory has been successfully loaded from the bagfile.
<= failed Failed to load trajectory.
'''
def __init__(self, chains, transitiontime, settlingtime):
'''Constructor'''
super(GenerateTrajectoryFromTxtfileState, self).__init__(outcomes = ['done', 'failed'],
input_keys = ['txtfilepaths'],
output_keys = ['trajectories'])
self._trajectories = dict() # Store trajectories here until writing to userdata
self._chains = chains
self._transitiontime = transitiontime
self._settlingtime = settlingtime
self._failed = False
self._done = False
def execute(self, userdata):
'''Code to be executed while SM is in this state.'''
if self._failed:
return 'failed'
if self._done:
return 'done'
if len(self._trajectories) != 0:
self._done = True
return 'done'
else:
#Logger.logwarn('Looks like the text files might have been empty!?')
self._failed = True
return 'failed'
def on_enter(self, userdata):
try:
'''Upon entering the state, load trajectories from txtfile and write goal message.'''
self._done = False
self._failed = False
self.txtfilepaths = userdata.txtfilepaths
# Definitions
l_arm_range = range(16,23);
r_arm_range = range(23,30);
atlasJointNames = [
'back_bkz', 'back_bky', 'back_bkx', 'neck_ry',
'l_leg_hpz', 'l_leg_hpx', 'l_leg_hpy', 'l_leg_kny', 'l_leg_aky', 'l_leg_akx',
'r_leg_hpz', 'r_leg_hpx', 'r_leg_hpy', 'r_leg_kny', 'r_leg_aky', 'r_leg_akx',
'l_arm_shz', 'l_arm_shx', 'l_arm_ely', 'l_arm_elx', 'l_arm_wry', 'l_arm_wrx', 'l_arm_wry2',
'r_arm_shz', 'r_arm_shx', 'r_arm_ely', 'r_arm_elx', 'r_arm_wry', 'r_arm_wrx', 'r_arm_wry2']
# Set Trajectory Message Header
# has to be zero, so that appended trajectories have the same starting time
# has to be the same as below to support rosbag play
#try:
for chain in self._chains:
t = 0.0 # time after start
# Initialize Trajectory Message
jt = JointTrajectory()
jt.header.stamp = rospy.rostime.Time.from_sec(0.1)
if chain == 'left_arm':
joint_range = l_arm_range
elif chain == 'right_arm':
joint_range = r_arm_range
else:
Logger.logwarn('CalculateForceTorqueCalibration: Undefined chain %s', chain)
txtfile = os.path.expanduser(self.txtfilepaths[chain])
# Add Joint Names depending on chain
for i in joint_range:
jt.joint_names.append(atlasJointNames[i])
line_number = 0
with open(txtfile) as f:
#Logger.loginfo('Accessing txtfile: %s' % txtfile)
# Loop over lines and extract variables of interest
for line in f:
line_number = line_number + 1
# get line of text file
line = line.strip()
columns = line.split()
# Check number of columns
if len(columns) != len(joint_range):
Logger.logwarn('Input %s contains %d columns in line %d. Expected %d.' % (txtfile, len(columns), line_number, len(joint_range)) )
self._failed = True
return
# assemble Joint Trajectory message
for t_add in [self._transitiontime, self._settlingtime]:
p = JointTrajectoryPoint()
# Set Time
t = float(t) + float(t_add)
p.time_from_start = rospy.rostime.Time.from_sec(t)
# append joint positions
I=0 # Column-Index of text-File
for j in range(len(joint_range)):
p.positions.append(float(columns[I]))
p.velocities.append(0.0)
p.accelerations.append(0.0)
p.effort.append(0.0)
I=I+1
# append point to trajectory
jt.points.append(p)
# end trajectory points assembly
self._trajectories[chain] = jt
if line_number == 0:
Logger.logwarn('Loaded only %d lines from %s. Something went wrong.' % (line_number, txtfile) )
self._failed = True
return
userdata.trajectories = self._trajectories
except Exception as e:
Logger.logwarn('Could not load trajectory from text file because:\n %s' % str(e))
self._failed = True
|
team-vigir/vigir_behaviors
|
vigir_flexbe_states/src/vigir_flexbe_states/generate_trajectory_from_txtfile_state.py
|
Python
|
bsd-3-clause
| 4,998
|
from setuptools import setup, find_packages
from pyvarnish import __author__, __version__
setup(
name='pyvarnish',
version=__version__,
description='Varnish Management',
long_description=open('README.rst').read(),
author=__author__,
author_email='john@8t8.eu',
url='https://github.com/redsnapper8t8/pyvarnish',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
entry_points = {
'console_scripts': [
'pyvarnish = pyvarnish.parse_stats:main',
],
},
install_requires = [
'lxml',
'paramiko',
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
redsnapper8t8/pyvarnish
|
setup.py
|
Python
|
bsd-3-clause
| 967
|