code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python3
from setuptools import setup, find_packages
README = open('README.md').readlines()
setup(
name='django-msgpackfield',
version='0.15',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description=README[2].rstrip('\n'),
long_description=''.join(README),
url='https://github.com/vakorol/django-msgpackfield',
author='Vasili Korol',
author_email='vakorol@mail.ru',
install_requires=(
'django>=1.8',
'msgpack-python>=0.5',
),
classifiers=[
'Framework :: Django',
'Framework :: Django :: 2.0',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta',
],
)
| vakorol/django-msgpackfield | setup.py | Python | mit | 1,098 |
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('spheretrax_ros')
import rospy
from spheretrax_ros.spheretrax_publisher import SphereTrax_Publisher
rospy.init_node('spheretrax_publisher_test')
pub = SphereTrax_Publisher()
cnt = 0
data = {}
while not rospy.is_shutdown():
print(cnt)
data['framenumber'] = cnt
data['timestamp'] = float(cnt)
data['omega_x'] = cnt
data['omega_y'] = cnt+1
data['omega_z'] = cnt+2
data['forw_rate'] = 2*cnt
data['head_rate'] = 4*cnt
data['side_rate'] = 6*cnt
pub.publish_data(data)
cnt+=1
rospy.sleep(0.1)
| willdickson/spheretrax_ros | nodes/test.py | Python | bsd-2-clause | 636 |
# drv_pylab.py
#
# Copyright 2009 charley <charley@hosts-137-205-164-145.phys.warwick.ac.uk>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# This file contain a number of drivers classes to matplotlib.
from matplotlib.pyplot import figure
from matplotlib import cm
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
from matplotlib import rcParams
from numpy import linspace
from numpy import logspace
from numpy import sign
from numpy import average
from numpy import dtype
from numpy import real
from numpy import imag
from numpy import abs
from numpy import angle
from numpy import log10
from numpy import unwrap
from numpy import arctan2
from numpy import imag
from numpy import real
from scipy.signal import freqz
from misc import unitPrefix, prettyunit
def plotcomplexpolar(metaAry, size = (10, 7.5), dpi = 75, grid = True, legend = 0, fontsize = 15):
"""
metaArray function to do a simple 1D plot of complex array as magnitude and phase angle.
legend:
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
"""
if legend is None:
legend = 0
axis = metaAry['range']
mag = abs(metaAry.data)
pha = angle(metaAry.data, deg=True)
# Load the plotting ranges and units
x0 = axis['begin'][0]
x1 = axis['end'][0]
my0 = min(mag)
my1 = max(mag)
py0 = min(pha)
py1 = max(pha)
xunit = axis['unit'][0]
myunit = metaAry['unit']
pyunit = 'Degree'
# Leave 10% margin in the y axis
mmean = average((my0, my1))
mreach = abs(my0-my1) / 2 / 0.9
my0 = sign(my0-mmean) * mreach + mmean
my1 = sign(my1-mmean) * mreach + mmean
pmean = average((py0, py1))
preach = abs(py0-py1) / 2 / 0.9
py0 = sign(py0-pmean) * preach + pmean
py1 = sign(py1-pmean) * preach + pmean
# Apply unit prefix if unit is defined
xunit, x0, x1, xscale = prettyunit(xunit, x0, x1)
myunit, my0, my1, myscale = prettyunit(myunit, my0, my1)
pyunit, py0, py1, pyscale = prettyunit(pyunit, py0, py1)
if myscale != 1:
mag = mag * myscale
if pyscale != 1:
pha = pha.copy() * pyscale
xlabl = lbl_repr(axis['label'][0], xunit)
mylabl = lbl_repr(metaAry['label'], myunit, "Magnitude")
pylabl = lbl_repr(metaAry['label'], pyunit, "Phase angle")
title = metaAry['name']
fig = figure(figsize=size, dpi = dpi)
host = SubplotHost(fig, 111)
fig.add_subplot(host)
par = host.twinx()
if axis['log'][0] is False:
x = linspace(x0, x1, len(metaAry))
else:
raise NotImplemented, "Log axis is not yet implemented."
host.plot(x, mag, 'b-', label=lbl_repr(axis['label'][0], '', "Magnitude"))
par.plot(x, pha, 'r--', label=lbl_repr(axis['label'][0], '', "Phase"))
host.grid(grid)
host.set_xlabel(xlabl, fontsize=fontsize)
host.set_ylabel(mylabl, fontsize=fontsize)
par.set_ylabel(pylabl, fontsize=fontsize)
host.set_xlim([x0, x1])
host.set_ylim([my0, my1])
par.set_ylim([py0, py1])
if fontsize is not None:
host.set_title(title, fontsize=int(fontsize*1.3))
else:
host.set_title(title)
if legend >= 0:
host.legend(loc=legend)
return fig, host, par
def plotcomplex(metaAry, size = (10, 7.5), dpi = 75, grid = True, legend = 0, fontsize = 15):
"""
metaArray function to do a simple 1D plot of complex array as real and imaginary parts.
legend:
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
"""
if legend is None:
legend = 0
axis = metaAry['range']
rdata = metaAry.data.real
idata = metaAry.data.imag
# Load the plotting ranges and units
x0 = axis['begin'][0]
x1 = axis['end'][0]
ry0 = min(rdata)
ry1 = max(rdata)
iy0 = min(idata)
iy1 = max(idata)
xunit = axis['unit'][0]
ryunit = metaAry['unit']
iyunit = metaAry['unit']
# Leave 10% margin in the y axis
rmean = average((ry0, ry1))
rreach = abs(ry0-ry1) / 2 / 0.9
ry0 = sign(ry0-rmean) * rreach + rmean
ry1 = sign(ry1-rmean) * rreach + rmean
imean = average((iy0, iy1))
ireach = abs(iy0-iy1) / 2 / 0.9
iy0 = sign(iy0-imean) * ireach + imean
iy1 = sign(iy1-imean) * ireach + imean
# Apply unit prefix if unit is defined
xunit, x0, x1, xscale = prettyunit(xunit, x0, x1)
ryunit, ry0, ry1, ryscale = prettyunit(ryunit, ry0, ry1)
iyunit, iy0, iy1, iyscale = prettyunit(iyunit, iy0, iy1)
if ryscale != 1:
rdata = rdata.copy() * ryscale
if iyscale != 1:
idata = idata.copy() * iyscale
xlabl = lbl_repr(axis['label'][0], xunit)
rylabl = lbl_repr(metaAry['label'], ryunit, "Real part")
iylabl = lbl_repr(metaAry['label'], iyunit, "Imaginary part")
title = metaAry['name']
fig = figure(figsize=size, dpi = dpi)
host = SubplotHost(fig, 111)
fig.add_subplot(host)
par = host.twinx()
if axis['log'][0] is False:
x = linspace(x0, x1, len(metaAry))
else:
raise NotImplemented, "Log axis is not yet implemented."
host.plot(x, rdata, 'b-', label=lbl_repr(axis['label'][0], '', "Real"))
par.plot(x, idata, 'r--', label=lbl_repr(axis['label'][0], '', "Imaginary"))
host.grid(grid)
host.set_xlabel(xlabl, fontsize=fontsize)
host.set_ylabel(rylabl, fontsize=fontsize)
par.set_ylabel(iylabl, fontsize=fontsize)
host.set_xlim([x0, x1])
host.set_ylim([ry0, ry1])
par.set_ylim([iy0, iy1])
if fontsize is not None:
host.set_title(title, fontsize=int(fontsize*1.3))
else:
host.set_title(title)
if legend >= 0:
host.legend(loc=legend)
return fig, host, par
def plot1d(metaAry, size = (10, 7.5), dpi = 75, grid = True, legend = None, fontsize = 15,\
fig = None, ax = None, label = None):
"""
metaArray function to do a simple 1D plot.
legend:
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
label Label for the legend display, default to metaAry['range']['label'][0]
"""
if metaAry.dtype is dtype('complex'):
return plotcomplex(metaAry, size = size, dpi = dpi, grid = grid, legend = legend, fontsize = fontsize)
if legend is None:
legend = -1
axis = metaAry['range']
data = metaAry.data
# Load the plotting ranges and units
x0 = axis['begin'][0]
x1 = axis['end'][0]
y0 = min(metaAry.data)
y1 = max(metaAry.data)
xunit = axis['unit'][0]
yunit = metaAry['unit']
# Leave 10% margin in the y axis
mean = average((y0, y1))
reach = abs(y0-y1) / 2 / 0.9
y0 = sign(y0-mean) * reach + mean
y1 = sign(y1-mean) * reach + mean
# Apply unit prefix if unit is defined
xunit, x0, x1, xscale = prettyunit(xunit, x0, x1)
yunit, y0, y1, yscale = prettyunit(yunit, y0, y1)
if yscale != 1:
data = data.copy() * yscale
xlabl = lbl_repr(axis['label'][0], xunit)
ylabl = lbl_repr(metaAry['label'], yunit)
title = metaAry['name']
# check if object is 1D metaArray object
if fig is None:
fig = figure(figsize=size, dpi = dpi)
if ax is None:
ax = fig.add_subplot(111)
else:
x00, x01 = ax.get_xlim()
y00, y01 = ax.get_ylim()
x0 = min((x0, x00))
y0 = min((y0, y00))
x1 = max((x1, x01))
y1 = max((y1, y01))
if axis['log'][0] is False:
x = linspace(x0, x1, len(metaAry))
else:
raise NotImplemented
if label is None:
label = axis['label'][0]
ax.plot(x, data, label=label)
ax.grid(grid)
ax.set_xlabel(xlabl, fontsize=fontsize)
ax.set_ylabel(ylabl, fontsize=fontsize)
ax.set_xlim([x0, x1])
ax.set_ylim([y0, y1])
if fontsize is not None:
ax.set_title(title, fontsize=int(fontsize*1.3))
else:
ax.set_title(title)
if legend >= 0:
ax.legend(loc=legend)
return fig, ax
def plot2d(metaAry, size = (10, 7.5), dpi = 75, fontsize = 15, cmap = None, \
nticks = 5, aspect_ratio = 1.0, corient = 'vertical', cformat = None,
vmin = None, vmax = None):
"""
metaArray function to do a simple 2D plot.
cmap Colour map, default is pyplot.cm.spectral
nticks Number of ticks in the colour bar
aspect_ratio Aspect ratio of the plot {float|'ij'|'xy'}
float: Fixed aspect ratio by the given number
'ij': Same aspect ratio as ij space
'xy': Same aspect ratio as xy space
corient Colorbar orientation ('vertical'|'horizontal')
cformat Colorbar format [ None | format string | Formatter object ]
vmin Minimum value for the colour scale
vmax Maximum value for the coloir scale
"""
if cmap is None:
cmap = cm.spectral
if corient is not 'horizontalt':
corient = 'vertical'
axis = metaAry['range']
data = metaAry.data
x0 = axis['begin'][0]
x1 = axis['end'][0]
y0 = axis['begin'][1]
y1 = axis['end'][1]
if vmin is None:
v0 = metaAry.data.min()
else:
v0 = vmin
if vmax is None:
v1 = metaAry.data.max()
else:
v1 = vmax
xunit = axis['unit'][0]
yunit = axis['unit'][1]
vunit = metaAry['unit']
# Apply unit prefix if unit is defined
xunit, x0, x1, xscale = prettyunit(xunit, x0, x1)
yunit, y0, y1, yscale = prettyunit(yunit, y0, y1)
vunit, v0, v1, vscale = prettyunit(vunit, v0, v1)
if vscale != 1:
data = data.copy() * vscale
xlabl = lbl_repr(axis['label'][0], xunit)
ylabl = lbl_repr(axis['label'][1], yunit)
vlabl = lbl_repr(metaAry['label'], vunit)
ticks = linspace(v0, v1, nticks)
ticks_lbl = []
for i in range(nticks):
ticks_lbl.append("%(val)0.4g" % {'val':ticks[i]})
# Aspect ration of the plot
if aspect_ratio == 'ij':
ratio = data.shape
ratio = float(ratio[1]) / ratio[0]
elif aspect_ratio == 'xy':
ratio = float(y1 - y0) / float(x1 - x0)
else:
try:
ratio = float(aspect_ratio)
except:
print "*** Warning! Unrecognisable aspect ratio spec. Using the default instead."
ratio = 1.0
ratio /= float(y1 - y0) / float(x1 - x0)
ratio = abs(ratio)
# Make plot with vertical (default) colorbar
fig = figure(figsize=size, dpi = dpi)
ax = fig.add_subplot(111)
extent = (x0, x1, y0, y1)
cax = ax.imshow(data.transpose()[::-1], cmap=cmap, extent=extent, interpolation = 'bicubic', vmin = v0, vmax = v1, aspect=ratio)
cbar = fig.colorbar(cax, ticks=ticks, orientation=corient, format=cformat)
# ax.set_size(fontsize)
ax.set_xlabel(xlabl, fontsize=fontsize) # Label font size
ax.set_ylabel(ylabl, fontsize=fontsize)
rcParams.update({'font.size': fontsize}) # Value font size
# Add colorbar, make sure to specify tick locations to match desired ticklabels
cbar.ax.set_yticklabels(ticks_lbl)
cbar.set_label(vlabl, fontsize=fontsize)
if fontsize is not None:
ax.set_title(metaAry['name'], fontsize=int(fontsize*1.3))
else:
ax.set_title(metaAry['name'])
return fig, ax
def lbl_repr(label = None, unit = None, string = None):
"""
Format axis label and unit into a nice looking string
String: Additional string between label and unit.
"""
lbl = ''
try:
# Ignore label if it is not a string, for it can be None also
lbl += label
except TypeError:
pass
try:
# Append the additional arguement if exist
lbl += ' [' + string + ']'
except TypeError:
pass
try:
if unit == '':
pass # Unit less quantities
else:
lbl += ' (' + unit + ')'
except TypeError:
# Most likely unit is not defined, i.e. not a string.
lbl += ' (Arb.)'
return lbl
| Charley-fan/metaArray | drv_pylab.py | Python | gpl-3.0 | 13,872 |
import os
from wheelerdata.load.base import Wheelerdata
class Butterfly(Wheelerdata):
"""Butterfly data"""
def __init__(self):
super(Butterfly, self).__init__()
self.scodes = [4, 5, 7, 17, 18, 19, 21, 22, 23, 25, 26, 30]
# self.scodes = [4, 5, 7, 17, 18, 19, 20, 21, 22, 23, 25, 26, 30]
# S20's data was corrupted in roinii and I don't have
# access to the orignal nii to recreate it.
self.name = "butterfly"
self.datapath = "/data/data2/meta_accumulate/" + self.name
self.roipath = os.path.join(self.datapath, "roinii")
self.metapath = os.path.join(self.datapath, "fidl")
self.TR = 2.0
class SimAccumButterfly(Butterfly):
"""Simulated accumulator Butterfly data"""
def __init__(self):
super(SimAccumButterfly, self).__init__()
self.datapath = "/data/data2/meta_accumulate/" + self.name
self.roipath = os.path.join(self.datapath, "simnii")
self.metapath = os.path.join(self.datapath, "fidl")
| parenthetical-e/wheelerdata | load/butterfly.py | Python | bsd-2-clause | 1,030 |
'''
Created on 24 feb. 2017
@author: fara
'''
from matplotlib import use
#use('TkAgg')
from __builtin__ import int
import matplotlib.pyplot as plt
import numpy as np
from show import show
def displayData(X):
'''
%DISPLAYDATA Display 2D data in a nice grid
% [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data
% stored in X in a nice grid. It returns the figure handle h and the
% displayed array if requested.
'''
#np.set_printoptions(threshold=np.inf)
#np.set_printoptions(precision=20)
m,n = X.shape
show(X.shape)
example_width = int(round(np.sqrt(n)))
example_height = int((n / example_width))
# Compute number of items to display
display_rows = np.floor(np.sqrt(m))
display_cols = np.ceil(m / display_rows)
# Between images padding
pad = 1
# Setup blank display
x = pad + display_rows * (example_height + pad)
y = pad + display_cols * (example_width + pad)
#display_array = np.ones((pad + display_rows * (example_height + pad), pad + display_cols * (example_width + pad)))
display_array = - np.ones((x.astype(np.int),y.astype(np.int)),dtype=np.float64)
#show(display_array)
# Copy each example into a patch on the display array
curr_ex = 0
for j in np.arange(display_rows):
for i in np.arange(display_cols):
if curr_ex > m:
break
# Get the max value of the patch
max_val = np.max(np.abs(X[curr_ex, :]))
rows = [pad + (j * (example_height + pad)) + x for x in np.arange(example_height+1)]
cols = [pad + (i * (example_width + pad)) + x for x in np.arange(example_width+1)]
display_array[int(min(rows)):int(max(rows)), int(min(cols)):int(max(cols))] = \
X[curr_ex, :].reshape(example_height, example_width) / max_val
curr_ex +=1
if curr_ex > m:
break
#Set plot in interactive mode
plt.ion()
# Display Image
plt.imshow(display_array.T)
plt.set_cmap('gray')
# Do not show axis
plt.axis('off')
plt.show()
plt.pause(0.0001)
raw_input("Program paused. Press Enter to continue.")
plt.close() | ramondiez/machine-learning | ex3/displayData.py | Python | gpl-3.0 | 2,360 |
# -- coding: utf-8 --
# Note that we import as `DjangoRequestFactory` and `DjangoClient` in order
# to make it harder for the user to import the wrong thing without realizing.
from __future__ import unicode_literals
import django
from django.conf import settings
from django.test.client import Client as DjangoClient
from django.test.client import ClientHandler
from django.test import testcases
from django.utils.http import urlencode
from rest_framework.settings import api_settings
from rest_framework.compat import RequestFactory as DjangoRequestFactory
from rest_framework.compat import force_bytes_or_smart_bytes, six
def force_authenticate(request, user=None, token=None):
request._force_auth_user = user
request._force_auth_token = token
class APIRequestFactory(DjangoRequestFactory):
renderer_classes_list = api_settings.TEST_REQUEST_RENDERER_CLASSES
default_format = api_settings.TEST_REQUEST_DEFAULT_FORMAT
def __init__(self, enforce_csrf_checks=False, **defaults):
self.enforce_csrf_checks = enforce_csrf_checks
self.renderer_classes = {}
for cls in self.renderer_classes_list:
self.renderer_classes[cls.format] = cls
super(APIRequestFactory, self).__init__(**defaults)
def _encode_data(self, data, format=None, content_type=None):
"""
Encode the data returning a two tuple of (bytes, content_type)
"""
if not data:
return ('', None)
assert format is None or content_type is None, (
'You may not set both `format` and `content_type`.'
)
if content_type:
# Content type specified explicitly, treat data as a raw bytestring
ret = force_bytes_or_smart_bytes(data, settings.DEFAULT_CHARSET)
else:
format = format or self.default_format
assert format in self.renderer_classes, ("Invalid format '{0}'. "
"Available formats are {1}. Set TEST_REQUEST_RENDERER_CLASSES "
"to enable extra request formats.".format(
format,
', '.join(["'" + fmt + "'" for fmt in self.renderer_classes.keys()])
)
)
# Use format and render the data into a bytestring
renderer = self.renderer_classes[format]()
ret = renderer.render(data)
# Determine the content-type header from the renderer
content_type = "{0}; charset={1}".format(
renderer.media_type, renderer.charset
)
# Coerce text to bytes if required.
if isinstance(ret, six.text_type):
ret = bytes(ret.encode(renderer.charset))
return ret, content_type
def get(self, path, data=None, **extra):
r = {
'QUERY_STRING': urlencode(data or {}, doseq=True),
}
r.update(extra)
return self.generic('GET', path, **r)
def post(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('POST', path, data, content_type, **extra)
def put(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('PUT', path, data, content_type, **extra)
def patch(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('PATCH', path, data, content_type, **extra)
def delete(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('DELETE', path, data, content_type, **extra)
def options(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('OPTIONS', path, data, content_type, **extra)
def request(self, **kwargs):
request = super(APIRequestFactory, self).request(**kwargs)
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
return request
class ForceAuthClientHandler(ClientHandler):
"""
A patched version of ClientHandler that can enforce authentication
on the outgoing requests.
"""
def __init__(self, *args, **kwargs):
self._force_user = None
self._force_token = None
super(ForceAuthClientHandler, self).__init__(*args, **kwargs)
def get_response(self, request):
# This is the simplest place we can hook into to patch the
# request object.
force_authenticate(request, self._force_user, self._force_token)
return super(ForceAuthClientHandler, self).get_response(request)
class APIClient(APIRequestFactory, DjangoClient):
def __init__(self, enforce_csrf_checks=False, **defaults):
super(APIClient, self).__init__(**defaults)
self.handler = ForceAuthClientHandler(enforce_csrf_checks)
self._credentials = {}
def credentials(self, **kwargs):
"""
Sets headers that will be used on every outgoing request.
"""
self._credentials = kwargs
def force_authenticate(self, user=None, token=None):
"""
Forcibly authenticates outgoing requests with the given
user and/or token.
"""
self.handler._force_user = user
self.handler._force_token = token
if user is None:
self.logout() # Also clear any possible session info if required
def request(self, **kwargs):
# Ensure that any credentials set get added to every request.
kwargs.update(self._credentials)
return super(APIClient, self).request(**kwargs)
class APITransactionTestCase(testcases.TransactionTestCase):
client_class = APIClient
class APITestCase(testcases.TestCase):
client_class = APIClient
if django.VERSION >= (1, 4):
class APISimpleTestCase(testcases.SimpleTestCase):
client_class = APIClient
class APILiveServerTestCase(testcases.LiveServerTestCase):
client_class = APIClient
| hsfzxjy/wisecitymbc | site_packages/rest_framework/test.py | Python | gpl-2.0 | 6,436 |
import asyncio
from base import Event
class TerminalEmulator(object):
def __init__(self):
super(TerminalEmulator, self).__init__()
self._logger = None
self.revision = 0
self._width = 0
self._height = 0
self.lines = []
self.cursorX = 0
self.cursorY = 0
self.coroutine = None
self.peekBuffer = None
self.resize(80, 24)
# TerminalEmulator
@property
def logger(self):
return self._logger
@logger.setter
def logger(self, logger):
self._logger = logger
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@width.setter
def width(self, width):
self.resize(width, self.height)
@height.setter
def height(self, height):
self.resize(self.width, height)
def clear(self):
for y in range(0, self.height):
self.lines[y] = [" "] * self.width
def reset(self):
self.clear()
self.cursorX = 0
self.cursorY = 0
def resize(self, w, h):
if self._width != w:
self.lines = [line[0:w] for line in self.lines]
for y in range(0, self._height):
line = self.lines[y]
while len(line) < w:
line.append(" ")
self._width = w
if self._height != h:
self.lines = self.lines[0:h]
while h > len(self.lines):
self.lines.append([" "] * self._width)
self._height = h
self.clampCursor()
@property
def buffer(self):
return "\n".join([self.bufferLine(y) for y in range(0, self.height)])
def bufferLineRange(self, startLine, endLine):
return "\n".join([self.bufferLine(y) for y in range(startLine, endLine)])
def bufferLine(self, line):
if line != self.cursorY: return "".join(self.lines[line]).rstrip()
return "".join(self.lines[line][0:self.cursorX]) + "\u200B\u0332" + "".join(self.lines[line][(self.cursorX):])
def write(self, bytes):
if self.coroutine is None:
self.coroutine = self.processInput()
next(self.coroutine)
for b in bytes:
self.coroutine.send(b)
# Internal
def clampCursor(self):
self.cursorX = max(0, min(self.cursorX, self.width - 1))
self.cursorY = max(0, min(self.cursorY, self.height - 1))
def setCursorPos(self, x, y):
self.cursorX = x
self.cursorY = y
self.clampCursor()
@asyncio.coroutine
def processInput(self):
while True:
c = yield from self.readCharacter()
if c == "\x08":
if self.cursorX == 0:
if self.cursorY > 0:
self.setCursorPos(self.width - 1, self.cursorY - 1)
else:
pass
else:
self.setCursorPos(self.cursorX - 1, self.cursorY)
elif c == "\r":
self.processCarriageReturn()
elif c == "\n":
self.processLineFeed()
elif c == "\x1b":
c = yield from self.peekCharacter()
if c == "[":
self.advance()
c = yield from self.peekCharacter()
if c == "?": self.advance()
c = yield from self.peekCharacter()
if c == "A":
self.advance()
self.setCursorPos(self.cursorX, self.cursorY - 1)
elif c == "B":
self.advance()
self.setCursorPos(self.cursorX, self.cursorY + 1)
elif c == "C":
self.advance()
self.setCursorPos(self.cursorX + 1, self.cursorY)
elif c == "D":
self.advance()
self.setCursorPos(self.cursorX - 1, self.cursorY)
elif c == "H":
self.advance()
self.setCursorPos(0, 0)
elif c == "J":
self.advance()
self.clearCharacterSpan(self.cursorY, self.cursorX, self.width)
self.clearLineRange(self.cursorY + 1, self.height)
elif c == "K":
self.advance()
self.clearCharacterSpan(self.cursorY, self.cursorX, self.width)
else:
nString = yield from self.readNumber()
mString = ""
semicolon = False
if (yield from self.acceptCharacter(";")):
semicolon = True
mString = yield from self.readNumber()
n = int(nString) if len(nString) > 0 else 1
m = int(mString) if len(mString) > 0 else 1
c = yield from self.peekCharacter()
if c == "A":
self.advance()
self.setCursorPos(self.cursorX, self.cursorY - n)
elif c == "B":
self.advance()
self.setCursorPos(self.cursorX, self.cursorY + n)
elif c == "C":
self.advance()
self.setCursorPos(self.cursorX + n, self.cursorY)
elif c == "D":
self.advance()
self.setCursorPos(self.cursorX - n, self.cursorY)
elif c == "G":
self.advance()
self.setCursorPos(n - 1, self.cursorY)
elif c == "H":
self.advance()
self.setCursorPos(m - 1, n - 1)
elif c == "J":
self.advance()
if n == 0 or n == 1:
rejectedString = "\\x1b[" + nString
if semicolon:
rejectedString += ";" + mString
rejectedString += "J"
self.writeString(rejectedString)
if self.logger is not None:
self.logger.log ("TerminalEmulator: Rejecting " + rejectedString)
elif n == 2:
self.reset()
elif c == "K":
self.advance()
if n == 0:
self.clearCharacterSpan(self.cursorY, self.cursorX, self.width)
elif n == 1:
self.clearCharacterSpan(self.cursorY, 0, self.cursorX)
elif n == 2:
self.clearLineRange(self, self.cursorY, self.cursorY + 1)
else:
raise RuntimeError()
elif c == "P":
self.advance()
for x in range(0, n):
del self.lines[self.cursorY][self.cursorX]
self.lines[self.cursorY].append(" ")
elif c == "d":
self.advance()
self.setCursorPos(self.cursorX, n - 1)
elif c == "h" or c == "l" or c == "m" or c == "r":
self.advance()
else:
rejectedString = "\\x1b[" + nString
if semicolon:
rejectedString += ";" + mString
self.writeString(rejectedString)
if self.logger is not None:
self.logger.log("TerminalEmulator: Rejecting " + rejectedString + ("\\x%02x" % ord(c)) + " / " + c)
elif c == ">": self.advance()
elif c == "(": self.advance() # Set default font
elif c == ")": self.advance() # Set alternate font
elif c == "D": self.advance() # Scroll down one line
elif c == "M": self.advance() # Scroll up one line
else:
rejectedString = "\\x1b" + c
self.writeString(rejectedString)
if self.logger is not None:
self.logger.log("TerminalEmulator: Rejecting \\x1b" + ("\\x%02x" % ord(c)) + " / " + c)
else:
self.writeCharacter(c)
# Input
def advance(self):
if self.peekBuffer is None:
raise RuntimeError()
self.peekBuffer = None
@asyncio.coroutine
def acceptByte(self, uint8):
if uint8 == (yield from self.peekByte()):
self.advance()
return True
return False
@asyncio.coroutine
def acceptCharacter(self, c):
if c == (yield from self.peekCharacter()):
self.advance()
return True
return False
@asyncio.coroutine
def peekByte(self):
if self.peekBuffer == None:
self.peekBuffer = yield
return self.peekBuffer
@asyncio.coroutine
def peekCharacter(self):
return chr((yield from self.peekByte()))
@asyncio.coroutine
def readByte(self):
uint8 = yield from self.peekByte()
self.advance()
return uint8
@asyncio.coroutine
def readCharacter(self):
return chr((yield from self.readByte()))
@asyncio.coroutine
def readNumber(self):
number = ""
while True:
c = yield from self.peekCharacter()
if not c.isdigit(): break
self.peekBuffer = None
number += c
return number
# Output
def clearCharacterSpan(self, line, startColumn, endColumn):
for x in range(startColumn, endColumn):
self.lines[self.cursorY][x] = " "
def clearLineRange(self, startLine, endLine):
for y in range(startLine, endLine):
self.lines[y] = [" "] * self.width
def processCarriageReturn(self):
self.cursorX = 0
def processLineFeed(self):
self.cursorX = 0
self.cursorY += 1
if self.cursorY >= self.height:
del self.lines[0]
self.lines.append([" "] * self.width)
self.cursorY = self.cursorY - 1
def writeCharacter(self, c):
self.lines[self.cursorY][self.cursorX] = c
self.cursorX += 1
if self.cursorX >= self.width:
self.processLineFeed()
def writeString(self, s):
for c in s:
self.writeCharacter(c)
| squahtx/hal9000 | plugins/base/terminalemulator.py | Python | mit | 8,289 |
#!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Role (Model) query functions.
"""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.cache import sidebar
from soc.logic.models import base
import soc.models.role
DEF_LAST_RESIGN_ERROR_FMT = "This user can't be " \
"resigned, please make sure it's not the last %(name)s."
ROLE_LOGICS = {}
SUGGESTED_FIELDS = ['given_name', 'surname', 'name_on_documents', 'phone',
'im_network', 'im_handle', 'home_page', 'blog', 'photo_url', 'latitude',
'longitude', 'email', 'res_street', 'res_city', 'res_state', 'res_country',
'res_postalcode', 'ship_street', 'ship_city', 'ship_state', 'ship_country',
'ship_postalcode', 'birth_date', 'tshirt_size', 'tshirt_style'
]
def registerRoleLogic(role_logic):
"""Adds the specified Role Logic to the known ones.
Args:
role_logic: Instance of or subclass from Role Logic
"""
global ROLE_LOGICS
name = role_logic.role_name
ROLE_LOGICS[name] = role_logic
class Logic(base.Logic):
"""Logic methods for the Role model.
"""
def __init__(self, model=soc.models.role.Role,
base_model=None, scope_logic=None, role_name=None,
disallow_last_resign=False):
"""Defines the name, key_name and model for this entity.
Args:
role_name: The name of this role used for instance for Requests
dissallow_last_resign: Iff True and a given role entity is the last of
its kind in its scope then this role can not be resigned.
"""
super(Logic, self).__init__(model, base_model=base_model,
scope_logic=scope_logic)
self.role_name = role_name
registerRoleLogic(self)
self.disallow_last_resign = disallow_last_resign
def _updateField(self, entity, entity_properties, name):
"""Special logic for role. If status changes to active we flush the sidebar.
"""
value = entity_properties[name]
if (name == 'status') and (entity.status != value) and value == 'active':
# in case the status of the role changes to active we flush the sidebar
# cache. Other changes will be visible after the retention time expires.
sidebar.flush(entity.user.account)
return True
def _onCreate(self, entity):
"""Flush the sidebar cache when a new active role entity has been created.
"""
if entity.status == 'active':
sidebar.flush(entity.user.account)
super(Logic, self)._onCreate(entity)
def canResign(self, entity):
"""Checks if the current entity is allowed to be resigned.
Args:
entity: a Role entity
Returns:
- None if the entity is allowed to resign.
- Error message otherwise.
"""
if self.disallow_last_resign:
# check if this is the last active role for it's scope
fields = {'scope': entity.scope,
'status': 'active'}
roles = self.getForFields(fields, limit=2)
# if this it the last one return error message
if len(roles) <= 1:
return DEF_LAST_RESIGN_ERROR_FMT
# resignation is possible
return None
def getRoleLogicsToNotifyUponNewRequest(self):
"""Returns a list with subclasses of Role Logic which should be notified
when a new request to obtain this Role arrives.
Returns:
A list with all Role Logics to notify
"""
return []
def getSuggestedInitialProperties(self, user):
"""Suggest role properties for a given user based on its previous entries.
Args:
user: a user entity
Returns:
A dict with values for fields defined in SUGGESTED_FIELDS or an empty
dictionary if no previous roles were found.
"""
filter = {
'status': ['active', 'inactive'],
'user': user,
}
role = None
for role_logic in ROLE_LOGICS.values():
role = role_logic.getForFields(filter, unique=True)
if role:
break
if not role:
return {}
return dict([(field, getattr(role, field)) for field in SUGGESTED_FIELDS])
logic = Logic()
| MatthewWilkes/mw4068-packaging | src/melange/src/soc/logic/models/role.py | Python | apache-2.0 | 4,706 |
# Small ipython extensions
import sys
import subprocess
def pip(line):
"""Execute a pip command.
Do:
%pip --help
to get an overview.
Note: when doing '%pip uninstall ...', use the -y option
to avoid being prompted (which does not work using %pip).
"""
do_pip(line.split())
def do_pip(args):
args = (sys.executable, "-m", "pip") + tuple(args)
with subprocess.Popen(args, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as pipe:
while True:
line = pipe.stdout.readline()
if not line:
break
sys.stdout.write(line.decode())
def pip_upgrade_all_user(line):
"""Attempt to upgrade all packages installed with --user"""
import pip
for dist in pip.get_installed_distributions(user_only=True):
do_pip(["install", "--upgrade", "--user", dist.project_name])
def pip_upgrade_all(line):
"""Attempt to upgrade all packages"""
from pip import get_installed_distributions
user = set(d.project_name for d in get_installed_distributions(user_only=True))
all = set(d.project_name for d in get_installed_distributions())
for dist in all - user:
do_pip(["install", "--upgrade", dist])
for dist in user:
do_pip(["install", "--upgrade", "--user", dist])
def load_ipython_extension(ipython):
# The `ipython` argument is the currently active `InteractiveShell`
# instance, which can be used in any way. This allows you to register
# new magics or aliases, for example.
ipython.register_magic_function(pip)
ipython.register_magic_function(pip_upgrade_all_user)
ipython.register_magic_function(pip_upgrade_all)
def unload_ipython_extension(ipython):
# If you want your extension to be unloadable, put that logic here.
pass
| stephanh42/ipython_pip_magics | ipython_pip_magics/__init__.py | Python | mit | 1,829 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""OAuth API.
A service that enables App Engine apps to validate OAuth requests.
Classes defined here:
Error: base exception type
NotAllowedError: OAuthService exception
OAuthRequestError: OAuthService exception
InvalidOAuthParametersError: OAuthService exception
InvalidOAuthTokenError: OAuthService exception
OAuthServiceFailureError: OAuthService exception
"""
import cPickle
import os
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import user_service_pb
from google.appengine.api import users
from google.appengine.runtime import apiproxy_errors
class Error(Exception):
"""Base error class for this module."""
class OAuthRequestError(Error):
"""Base error type for invalid OAuth requests."""
class NotAllowedError(OAuthRequestError):
"""Raised if the requested URL does not permit OAuth authentication."""
class InvalidOAuthParametersError(OAuthRequestError):
"""Raised if the request was a malformed OAuth request.
For example, the request may have omitted a required parameter, contained
an invalid signature, or was made by an unknown consumer.
"""
class InvalidOAuthTokenError(OAuthRequestError):
"""Raised if the request contained an invalid token.
For example, the token may have been revoked by the user.
"""
class OAuthServiceFailureError(Error):
"""Raised if there was a problem communicating with the OAuth service."""
def get_current_user(_scope=None):
"""Returns the User on whose behalf the request was made.
Args:
_scope: The custom OAuth scope or an iterable of scopes at least one of
which is accepted.
Returns:
User
Raises:
OAuthRequestError: The request was not a valid OAuth request.
OAuthServiceFailureError: An unknown error occurred.
"""
_maybe_call_get_oauth_user(_scope)
return _get_user_from_environ()
def is_current_user_admin(_scope=None):
"""Returns true if the User on whose behalf the request was made is an admin.
Args:
_scope: The custom OAuth scope or an iterable of scopes at least one of
which is accepted.
Returns:
boolean
Raises:
OAuthRequestError: The request was not a valid OAuth request.
OAuthServiceFailureError: An unknown error occurred.
"""
_maybe_call_get_oauth_user(_scope)
return os.environ.get('OAUTH_IS_ADMIN', '0') == '1'
def get_oauth_consumer_key():
"""Returns the value of the 'oauth_consumer_key' parameter from the request.
Returns:
string: The value of the 'oauth_consumer_key' parameter from the request,
an identifier for the consumer that signed the request.
Raises:
OAuthRequestError: The request was not a valid OAuth request.
OAuthServiceFailureError: An unknown error occurred.
"""
req = user_service_pb.CheckOAuthSignatureRequest()
resp = user_service_pb.CheckOAuthSignatureResponse()
try:
apiproxy_stub_map.MakeSyncCall('user', 'CheckOAuthSignature', req, resp)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
user_service_pb.UserServiceError.OAUTH_INVALID_REQUEST):
raise InvalidOAuthParametersError(e.error_detail)
elif (e.application_error ==
user_service_pb.UserServiceError.OAUTH_ERROR):
raise OAuthServiceFailureError(e.error_detail)
else:
raise OAuthServiceFailureError(e.error_detail)
return resp.oauth_consumer_key()
def get_client_id(_scope):
"""Returns the value of OAuth2 Client ID from an OAuth2 request.
Args:
_scope: The custom OAuth scope or an iterable of scopes at least one of
which is accepted.
Returns:
string: The value of Client ID.
Raises:
OAuthRequestError: The request was not a valid OAuth2 request.
OAuthServiceFailureError: An unknow error occurred.
"""
_maybe_call_get_oauth_user(_scope)
return _get_client_id_from_environ()
def get_authorized_scopes(scope):
"""Returns authorized scopes from input scopes.
Args:
scope: The custom OAuth scope or an iterable of scopes at least one of
which is accepted.
Returns:
list: A list of authorized OAuth2 scopes
Raises:
OAuthRequestError: The request was not a valid OAuth2 request.
OAuthServiceFailureError: An unknow error occurred
"""
_maybe_call_get_oauth_user(scope)
return _get_authorized_scopes_from_environ()
def _maybe_call_get_oauth_user(scope):
"""Makes an GetOAuthUser RPC and stores the results in os.environ.
This method will only make the RPC if 'OAUTH_ERROR_CODE' has not already
been set or 'OAUTH_LAST_SCOPE' is different to str(_scopes).
Args:
scope: The custom OAuth scope or an iterable of scopes at least one of
which is accepted.
"""
if not scope:
scope_str = ''
elif isinstance(scope, basestring):
scope_str = scope
else:
scope_str = str(sorted(scope))
if ('OAUTH_ERROR_CODE' not in os.environ or
os.environ.get('OAUTH_LAST_SCOPE', None) != scope_str or
os.environ.get('TESTONLY_OAUTH_SKIP_CACHE')):
req = user_service_pb.GetOAuthUserRequest()
if scope:
if isinstance(scope, basestring):
req.add_scopes(scope)
else:
req.scopes_list().extend(scope)
resp = user_service_pb.GetOAuthUserResponse()
try:
apiproxy_stub_map.MakeSyncCall('user', 'GetOAuthUser', req, resp)
os.environ['OAUTH_EMAIL'] = resp.email()
os.environ['OAUTH_AUTH_DOMAIN'] = resp.auth_domain()
os.environ['OAUTH_USER_ID'] = resp.user_id()
os.environ['OAUTH_CLIENT_ID'] = resp.client_id()
os.environ['OAUTH_AUTHORIZED_SCOPES'] = cPickle.dumps(
list(resp.scopes_list()), cPickle.HIGHEST_PROTOCOL)
if resp.is_admin():
os.environ['OAUTH_IS_ADMIN'] = '1'
else:
os.environ['OAUTH_IS_ADMIN'] = '0'
os.environ['OAUTH_ERROR_CODE'] = ''
except apiproxy_errors.ApplicationError, e:
os.environ['OAUTH_ERROR_CODE'] = str(e.application_error)
os.environ['OAUTH_ERROR_DETAIL'] = e.error_detail
os.environ['OAUTH_LAST_SCOPE'] = scope_str
_maybe_raise_exception()
def _maybe_raise_exception():
"""Raises an error if one has been stored in os.environ.
This method requires that 'OAUTH_ERROR_CODE' has already been set (an empty
string indicates that there is no actual error).
"""
assert 'OAUTH_ERROR_CODE' in os.environ
error = os.environ['OAUTH_ERROR_CODE']
if error:
assert 'OAUTH_ERROR_DETAIL' in os.environ
error_detail = os.environ['OAUTH_ERROR_DETAIL']
if error == str(user_service_pb.UserServiceError.NOT_ALLOWED):
raise NotAllowedError(error_detail)
elif error == str(user_service_pb.UserServiceError.OAUTH_INVALID_REQUEST):
raise InvalidOAuthParametersError(error_detail)
elif error == str(user_service_pb.UserServiceError.OAUTH_INVALID_TOKEN):
raise InvalidOAuthTokenError(error_detail)
elif error == str(user_service_pb.UserServiceError.OAUTH_ERROR):
raise OAuthServiceFailureError(error_detail)
else:
raise OAuthServiceFailureError(error_detail)
def _get_user_from_environ():
"""Returns a User based on values stored in os.environ.
This method requires that 'OAUTH_EMAIL', 'OAUTH_AUTH_DOMAIN', and
'OAUTH_USER_ID' have already been set.
Returns:
User
"""
assert 'OAUTH_EMAIL' in os.environ
assert 'OAUTH_AUTH_DOMAIN' in os.environ
assert 'OAUTH_USER_ID' in os.environ
return users.User(email=os.environ['OAUTH_EMAIL'],
_auth_domain=os.environ['OAUTH_AUTH_DOMAIN'],
_user_id=os.environ['OAUTH_USER_ID'])
def _get_client_id_from_environ():
"""Returns Client ID based on values stored in os.environ.
This method requires that 'OAUTH_CLIENT_ID' has already been set.
Returns:
string: the value of Client ID.
"""
assert 'OAUTH_CLIENT_ID' in os.environ
return os.environ['OAUTH_CLIENT_ID']
def _get_authorized_scopes_from_environ():
"""Returns authorized scopes based on values stored in os.environ.
This method requires that 'OAUTH_AUTHORIZED_SCOPES' has already been set.
Returns:
list: the list of OAuth scopes.
"""
assert 'OAUTH_AUTHORIZED_SCOPES' in os.environ
return cPickle.loads(os.environ['OAUTH_AUTHORIZED_SCOPES'])
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/google/appengine/api/oauth/oauth_api.py | Python | bsd-3-clause | 8,814 |
import numpy as np
import theano
import theano.tensor as T
from util import debug_input_data, show_debug_sample
from printing import print_section, print_test, print_valid, print_training
import random, sys, timeit
from sdg import Backpropagation
import interface.server
from config import visual_params
from wrapper import create_theano_func, create_profiler_func
from storage import ParamStorage
class Evaluator(object):
'''
The evaluator class contains the main training loop. It receives the model and dataset and conducts the optimization.
The number of epochs are supplied in run method, while the main loop draws parameters directly from the loaded
config.py. The learning rate, bootstrapping factor, curriculum switch and early stopping are set the loop inside
the _train method.
'''
def __init__(self, model, dataset, params, path):
self.data = dataset
self.model = model
self.params = params
self.report = {}
self.events = []
if(visual_params.gui_enabled):
interface.server.start_new_job(path=path)
def run(self, epochs=10, verbose=False, init=None):
batch_size = self.params.batch_size
self.nr_train_batches = self.data.get_total_number_of_batches(batch_size)
self.nr_valid_batches = self._get_number_of_batches('validation', batch_size)
self.nr_test_batches = self._get_number_of_batches('test', batch_size)
self._build(batch_size, init)
self._train(batch_size, epochs)
def _build(self, batch_size, init):
print_section('Building model')
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.imatrix('y') #label data
#Drop switch. Only train should drop units. For testing and validation all units should be used (but output rescaled)
drop = T.iscalar('drop')
learning_rate = T.scalar('learning_rate', dtype=theano.config.floatX)
mix_factor = T.scalar('factor', dtype=theano.config.floatX)
self.model.build(x, drop, batch_size, init_params=init)
errors = self.model.get_output_layer().errors(y)
self.test_model = create_theano_func('test', self.data, x, y, drop, [index], errors, batch_size)
self.validate_model = create_theano_func('validation', self.data, x, y, drop, [index], errors, batch_size)
self.get_training_loss = create_theano_func(
'train', self.data, x, y, drop, [index], errors, batch_size, prefix="_loss"
)
cost = self.model.get_cost(y, mix_factor) + (self.params.l2_reg * self.model.getL2())
opt = Backpropagation.create(self.model.params)
grads = T.grad(cost, self.model.params)
updates = opt.updates(self.model.params, grads, learning_rate, self.params.momentum)
self.train_model = create_theano_func(
'train', self.data, x, y, drop, [index, learning_rate, mix_factor], cost, batch_size,
updates=updates, dropping=True
)
self.tester = create_profiler_func(
self.data, x, y, drop, [index, mix_factor], self.model.get_output_layer(), cost, batch_size
)
def _debug(self, batch_size, nr_batches, factor):
'''
When gui has requested a debug. A random minibatch is chosen, and a number of images are displayed,
so user can evaulate progress.
'''
data = []
labels = []
predictions = []
number_of_tests = 6
for test in range(number_of_tests):
minibatch_index = random.randint(0, nr_batches-1)
v = random.randint(0,batch_size-1)
output, y, cost, errs = self.tester(minibatch_index, factor)
predictions.append(output[v])
labels.append(y[v])
data.append(self.data.set['train'][0][(minibatch_index*batch_size) + v].eval())
show_debug_sample(data, labels, predictions, 64, 16, std=self.data.std)
def _get_validation_score(self, batch_size, epoch, minibatch_index):
validation_loss = np.mean( [self.validate_model(i) for i in range(self.nr_valid_batches)] )
print_valid(epoch, minibatch_index + 1, self.nr_train_batches, validation_loss)
return validation_loss
def _get_training_score(self, nr_of_batches):
training_loss = np.mean( [self.get_training_loss(i) for i in range(nr_of_batches)])
print_training(training_loss)
return training_loss
def _get_test_score(self, batch_size):
test_score = np.mean( [self.test_model(i) for i in range(self.nr_test_batches)] )
print_test(test_score)
return test_score
def _get_number_of_batches(self, set_name, batch_size):
set_x, set_y = self.data.set[set_name]
nr_of_batches = set_x.get_value(borrow=True).shape[0]
nr_of_batches /= batch_size
return int(nr_of_batches)
def _train(self, batch_size, max_epochs):
print_section('Training model')
patience = self.params.initial_patience # look as this many examples regardless
patience_increase = self.params.patience_increase # wait this much longer when a new best is found
improvement_threshold = self.params.improvement_threshold # a relative improvement of this much is considered significant
learning_rate = self.params.learning_rate
learning_adjustment = self.params.learning_adjustment
learning_decrease = self.params.learning_decrease
nr_learning_adjustments = 0
print('---- Initial learning rate {}'.format(learning_rate))
max_factor = self.params.factor_rate
factor_adjustment = self.params.factor_adjustment
factor_decrease = self.params.factor_decrease
factor_minimum = self.params.factor_minimum
print('---- Initial loss mixture ratio {}'.format(max_factor))
curriculum = self.params.curriculum_enable
curriculum_start = self.params.curriculum_start
curriculum_adjustment = self.params.curriculum_adjustment
# go through this many minibatch before checking the network on the validation set
gui_frequency = 500
validation_frequency = min(self.nr_train_batches, patience / 2)
best_validation_loss = np.inf
best_iter = 0
test_score = 0.
self.start_time = timeit.default_timer()
storage = ParamStorage()
nr_chunks = self.data.get_chunk_number()
epoch = 0
done_looping = False
iter = 0
#==== INITIAL PERFORMANCE ====
chunk_batches = self.data.get_elements( 0 ) / batch_size
validation_score = self._get_validation_score(batch_size, epoch, 0)
test_score = self._get_test_score(batch_size)
training_score = self._get_training_score(chunk_batches)
#==== UPDATE GUI ====
if visual_params.gui_enabled:
interface.server.append_job_update(epoch, training_score, validation_score, test_score, learning_rate)
try:
while (epoch < max_epochs) and (not done_looping):
epoch = epoch + 1
if(epoch % learning_adjustment == 0):
learning_rate *= learning_decrease
nr_learning_adjustments += 1
#Temp
learning_adjustment = max(10, int(learning_adjustment/2))
print('---- New learning rate {}'.format(learning_rate))
if(epoch > factor_adjustment):
max_factor = max(max_factor * factor_decrease, factor_minimum)
print('---- New convex combination {}'.format(max_factor))
if(epoch % 20 == 0):
print('---- Storing temp model')
storage.store_params(self.model.params, id=str(epoch))
if(curriculum and epoch % curriculum_adjustment == 0 and epoch >= curriculum_start):
print("---- Mixing examples from next stage with training data")
self.data.mix_in_next_stage()
#For current examples chunk in GPU memory
for chunk_index in range(nr_chunks):
self.data.switch_active_training_set( chunk_index )
nr_elements = self.data.get_elements( chunk_index )
chunk_batches = nr_elements / batch_size
#Each chunk contains a certain number of batches.
for minibatch_index in range(chunk_batches):
cost_ij = self.train_model(minibatch_index, learning_rate, max_factor)
if iter % 1000 == 0:
print('---- Training @ iter = {}. Patience = {}. Loss = {}'.format(iter, patience, cost_ij))
if visual_params.gui_enabled and iter % gui_frequency == 0:
interface.server.get_command_status()
if visual_params.gui_enabled and interface.server.is_testing():
self._debug(batch_size, chunk_batches, max_factor)
#if(np.isnan(cost_ij)):
# print('cost IS NAN')
#==== EVAULATE ====
if (iter + 1) % validation_frequency == 0:
#==== CURRENT PERFORMANCE ====
validation_score = self._get_validation_score(batch_size, epoch, minibatch_index)
test_score = self._get_test_score(batch_size)
train_score = self._get_training_score(chunk_batches) #No other purpose than charting
#==== UPDATE GUI ====
if visual_params.gui_enabled:
interface.server.append_job_update(
epoch,
train_score,
validation_score,
test_score,
learning_rate)
self.events.append({
"epoch": epoch,
"training_loss": train_score,
"validation_loss": validation_score,
"test_loss": test_score,
"training_rate": learning_rate
})
#==== EARLY STOPPING ====
if validation_score < best_validation_loss:
#improve patience if loss improvement is good enough
if validation_score < best_validation_loss * improvement_threshold:
patience = max(patience, iter * patience_increase)
print("---- New best validation loss. Patience increased to {}".format(patience))
# save best validation score and iteration number
best_validation_loss = validation_score
best_iter = iter
if patience <= iter:
done_looping = True
break
if visual_params.gui_enabled and interface.server.stop:
done_looping = True
iter += 1 #Increment interation after each batch has been processed.
except KeyboardInterrupt:
self.set_result(best_iter, iter, best_validation_loss, test_score, nr_learning_adjustments, epoch)
print("Inpterupted by user. Current model params will be saved now.")
except Exception as e:
print "Unexpected error:", sys.exc_info()[0]
raise
self.set_result(best_iter, iter, best_validation_loss, test_score, nr_learning_adjustments, epoch)
def set_result(self, best_iter, iter, valid, test, nr_learning_adjustments, epoch):
end_time = timeit.default_timer()
duration = (end_time - self.start_time) / 60.
valid_end_score = valid
test_end_score = test
print('Optimization complete.')
print('Best validation score of %f obtained at iteration %i, '
'with test performance %f' %
(valid_end_score, best_iter + 1, test_end_score))
print('The code ran for %.2fm' % (duration))
self.report['evaluation'] = {
'best_iteration': best_iter+1, 'iteration': iter, 'test_score': test_end_score, 'valid_score': valid_end_score,
'learning_adjustments': nr_learning_adjustments, 'epoch': epoch, 'duration': duration
}
self.report['dataset'] = self.data.get_report()
def get_result(self):
return self.report | olavvatne/CNN | evaluator.py | Python | mit | 13,092 |
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
#os imports
import os
from sys import stdin,argv
import sys
from optparse import OptionParser
def name_set(in_names):
name_set = set([])
for i in in_names:
if not i.startswith("#"):
name_set.add(i)
return name_set
def reformat_as_fasta(filename, outfile):
"this function re-write a file as a fasta file"
f= open(outfile, 'w')
for seq_record in SeqIO.parse(filename, "fasta"):
seq_record.seq = str(seq_record.seq)
seq_record.seq = seq_record.seq.replace("-" , "")
SeqIO.write(seq_record, f, "fasta")
f.close()
return True
def reformat_as_fasta_nobio(filename, outfile):
"this function re-write a file as a fasta file"
f= open(outfile, 'w')
fh = open(filename, 'r')
for line in fh:
if line.startswith(">"):
title = line.split("/")[0]
print >> f, title.rstrip()
else:
seq = line.replace("-" , "")
print >> f, seq.rstrip()
f.close()
return True
if "-v" in sys.argv or "--version" in sys.argv:
print "v0.0.1"
sys.exit(0)
usage = """Use as follows:
converts
$ python rewrite_as_fasta.py -i in.fasta -l min_length_of_seq (default(3)) --not_wanted --wanted -o out.fasta
script either reformats badly formated fasta file. Within reason. Ie. Word documents will still break it.
if lenght of seq is longer than -l default 3 - writes to file.
can filter fasta by giving it a list of --not_wanted or --wanted names.
"""
parser = OptionParser(usage=usage)
parser.add_option("-i", dest="in_file", default=None,
help="current fasta you want to reformat")
parser.add_option("-o", "--out", dest="out", default=None,
help="Output filename",
metavar="FILE")
(options, args) = parser.parse_args()
in_file = options.in_file
out = options.out
#reformat_as_fasta(in_file,out)
reformat_as_fasta_nobio(in_file, out)
print 'done'
| widdowquinn/THAPBI | Phyt_ITS_identifying_pipeline/database_files/ITS_only_working/rewrite_as_fasta.py | Python | mit | 2,075 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""adding verbose_name to druid column
Revision ID: b318dfe5fb6c
Revises: d6db5a5cdb5d
Create Date: 2017-03-08 11:48:10.835741
"""
# revision identifiers, used by Alembic.
revision = 'b318dfe5fb6c'
down_revision = 'd6db5a5cdb5d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('columns', sa.Column('verbose_name', sa.String(length=1024), nullable=True))
def downgrade():
op.drop_column('columns', 'verbose_name')
| airbnb/caravel | superset/migrations/versions/b318dfe5fb6c_adding_verbose_name_to_druid_column.py | Python | apache-2.0 | 1,242 |
from datetime import datetime
from unittest.mock import PropertyMock, patch
from django.test import SimpleTestCase
from ws import models
from ws.utils import model_dates as utils
class MissedLectureTests(SimpleTestCase):
""" Test the logic that checks if a participant has missed lectures. """
def test_legacy_years(self):
""" Participants are not marked as missing lectures in first years. """
# We lack records for these early years, so we just assume presence
participant = None # Won't access the object anyway
self.assertFalse(utils.missed_lectures(participant, 2014))
self.assertFalse(utils.missed_lectures(participant, 2015))
@patch('ws.utils.model_dates.ws_lectures_complete')
@patch('ws.utils.dates.ws_year')
def test_lectures_incomplete(self, ws_year, ws_lectures_complete):
""" If this year's lectures haven't completed, nobody can be absent. """
ws_lectures_complete.return_value = False
participant = None # Won't access the object anyway
ws_year.return_value = current_year = 2525
self.assertFalse(utils.missed_lectures(participant, current_year))
@patch('ws.models.Participant.lectureattendance_set', new_callable=PropertyMock)
@patch('ws.utils.model_dates.ws_lectures_complete')
@patch('ws.utils.dates.ws_year')
def test_current_year(self, ws_year, ws_lectures_complete, lecture_attendance):
""" Check attendance in current year, after lectures complete.
We're in a year where attendance is recorded, and we're asking about the current
year. Did the participant attend?
"""
participant = models.Participant()
# We're asking about the current WS season, when lectures have occurred
ws_year.return_value = current_year = 2020
ws_lectures_complete.return_value = True
attendance_exists = lecture_attendance.return_value.filter.return_value.exists
# When participant has no attendance recorded, they've missed lectures
attendance_exists.return_value = False
self.assertTrue(utils.missed_lectures(participant, current_year))
# When the participant attended, they did not miss lectures
attendance_exists.return_value = True
self.assertFalse(utils.missed_lectures(participant, current_year))
class LecturesCompleteTests(SimpleTestCase):
""" Test the method that tries to infer when lectures are over. """
@patch('ws.utils.model_dates.ws_trips_this_year')
def test_no_trips_yet(self, ws_trips):
""" When there are no trips (past or planned), lectures aren't complete.
(The first trips are posted on Thursday of the first lecture week - without
these trips, we can reasonably infer that lectures are still ongoing)
"""
# Any filtering on trips returns an empty list (mocking an empty QuerySet)
ws_trips.return_value.filter.return_value = []
self.assertFalse(utils.ws_lectures_complete())
@patch('ws.utils.model_dates.ws_trips_this_year')
def test_past_trips(self, ws_trips):
""" When trips have already completed, lectures are definitely over. """
def past_only(**kwargs):
if 'trip_date__lt' in kwargs:
return [models.Trip(name="Some past trip")]
return []
ws_trips.return_value.filter.side_effect = past_only
self.assertTrue(utils.ws_lectures_complete())
@patch('ws.utils.dates.local_now')
@patch('ws.utils.model_dates.ws_trips_this_year')
def test_future_trips(self, ws_trips, local_now):
""" When there are no past trips, but there are upcoming trips. """
def future_only(**kwargs):
""" There are no trips in the past, but there are some upcoming. """
if 'trip_date__lt' in kwargs:
return []
if 'trip_date__gte' in kwargs:
return [models.Trip(name="Some upcoming trip")]
return []
ws_trips.return_value.filter.side_effect = future_only
# Test calling this function at various times of day
time_fmt = '%a %Y-%m-%d %H:%M'
expectations = {
# There are future trips, but it's not yet Thursday night
# (Explanation: Some leaders got ansty and created trips early)
'Wed 2018-01-03 12:00': False,
# It's evening, but lectures have just started
'Thu 2018-01-04 19:00': False,
# Leaders created trips, and it's after 9 pm, so we infer lectures are over
'Thu 2018-01-04 21:15': True,
# It's Friday, with upcoming trips. Lectures are definitely over.
'Fri 2018-01-05 10:23': True,
}
for time_string, lectures_over in expectations.items():
local_now.return_value = datetime.strptime(time_string, time_fmt)
self.assertEqual(utils.ws_lectures_complete(), lectures_over)
| DavidCain/WinterSchool | ws/tests/utils/test_model_dates.py | Python | gpl-3.0 | 4,964 |
import cPickle as pickle
import urllib2 as web
import re
import os
from time import sleep
class TenWeek(object):
def __init__(self):
self.file = 'tenw.PICKLE'
self.tenw = dict()
self.todayNotice = list()
self.load()
self.moodle()
self.dump()
def load(self):
if not os.path.exists(self.file):
with open(self.file, 'wb') as f:
pickle.dump([self.tenw],f,protocol=-1)
else:
with open(self.file, 'rb') as f:
self.tenw = pickle.load(f)[0]
def dump(self):
with open(self.file, 'wb') as f:
pickle.dump([self.tenw],f,protocol=-1)
def moodle(self, console = False):
with open('calenda','r') as f:
calenda = f.read()
success = False
while success is False:
try:
calenda = web.urlopen(calenda)
if calenda.getcode() == 200:
success = True
print "revived moodle information"
except Exception, e:
print e
sleep(5)
print "error retrieving moodle deadlines, trying again..."
events = calenda.read().split('\n')
# Sweeping variables
dls = dict()
smr = str()
tag = str()
dln = str()
for event in events:
event = event.strip()
event = re.sub(r"\\","",event)
event = re.sub(r"\r","",event)
event = re.sub(r"\t","",event)
event = re.sub(r"\s{2,}"," ",event)
event = event.split(':')
if event[0] == 'SUMMARY':
smr = ''.join(event[1:])
if event[0] == 'DESCRIPTION':
patch = ''.join(event[1:])
if patch != smr:
smr += ': ' + patch
if event[0] == 'CATEGORIES':
tag = event[1][:5]
if event[0] == 'DTSTART':
date = event[1][:8]
year = date[:4]
month = date[4:6]
day = date[6:8]
dln = '-'.join([year, month, day])
if event == ['END','VEVENT']:
content = '>> '.join([tag,smr])
if console:
print dln, content
if dln in self.tenw:
avail = self.tenw[dln]['DEADLINE']
if (content, True) in avail or (content, False) in avail:
pass
else:
self.tenw[dln]['DEADLINE'].append((content, False))
else:
self.tenw[dln] = {'TODO':[], 'DEADLINE': [(content, False)]}
def collect(self, timeSig):
marks = self.tenw.keys()
marks.sort()
i = 0
while i < len(marks) and marks[i] < timeSig:
i += 1
marks = marks[i:]
self.todayNotice = list()
for mark in marks:
dlns = self.tenw[mark]['DEADLINE']
for i in range(0,len(dlns)):
dln = dlns[i]
if not dln[1]:
self.todayNotice.append((mark, i, False))
def revive(self, timeSig):
self.moodle()
self.collect(timeSig)
self.dump()
def migrate(self, time, plan, order):
mail = dict()
if len(order) == 1:
todo_i = int(order[0]) - 1
content = plan.newestPlanList['TODO'][todo_i]
del plan.newestPlanList['TODO'][todo_i]
mail['plan'] = "todo #{}: '{}' is migrated".format(
todo_i + 1, content)
if len(order) == 2:
if len(order[0]) == 1:
time_i = ord(order[0])-97
key = plan.keys[time_i]
else:
key = int(order[0])
content = plan.newestPlanList[key][int(order[1])-1]
del plan.newestPlanList[key][int(order[1])-1]
if len(plan.newestPlanList[key]) == 0:
del plan.newestPlanList[key]
plan.keys = plan.newestPlanList.keys()
plan.keys.sort()
mail['plan'] = "timed {}#{}: '{}' is migrated".format(
key, order[1], content)
if time.tmrSig in self.tenw:
self.tenw[time.tmrSig]['TODO'].append(content)
else:
self.tenw[time.tmrSig] = {'TODO': [content], 'DEADLINE': []}
mail['tenw'] = "{} received todo '{}'".format(
time.tmrSig, content)
self.dump()
return mail
def dateRegul(self, date, tdSig):
dates = date.split('/')
day = dates[0]
month = dates[1]
if len(dates) == 2:
year = tdSig.split('-')[0]
else:
year = dates[2]
if len(day) == 1:
day = '0'+ day
if len(month) == 1:
month = '0'+ month
date = '-'.join([year, month, day])
return date
def pin(self, time, order):
mail = dict()
if len(order) == 1:
date = time.tmrSig
content = order[0]
if len(order) == 2:
date = self.dateRegul(order[0], time.tdSig)
content = order[1]
if date in self.tenw:
self.tenw[date]['TODO'].append(content)
else:
self.tenw[date] = {'TODO': [content], 'DEADLINE': []}
mail['tenw'] = "{} received todo '{}'".format(
date, content)
self.dump()
return mail
def deadline(self, tdSig, order):
mail = dict()
date = self.dateRegul(order[0], tdSig)
if date == tdSig:
mail['plan_changed'] = True
content = order[1]
index = int()
if date in self.tenw:
self.tenw[date]['DEADLINE'].append((content,False))
index = len(self.tenw[date]['DEADLINE']) - 1
else:
self.tenw[date] = {'TODO': [], 'DEADLINE': [(content,False)]}
index = 0
# Insert to today's notice list
self.collect(tdSig)
mail['tenw'] = "{} received deadline '{}'".format(
date, content)
self.dump()
return mail
def submitted(self, tdSig, order):
mail = dict()
for i in range(0, len(order)):
order_i = int(order[i]) - 1
content = self.todayNotice[order_i]
if content[0] == tdSig:
mail['plan_changed'] = True
self.todayNotice[order_i] = (content[0],content[1],True)
temp = self.tenw[content[0]]['DEADLINE'][content[1]][0]
self.tenw[content[0]]['DEADLINE'][content[1]] = (temp, True)
mail['tenw'] = "submitted {} deadlines".format(
len(order))
self.dump()
return mail
def todayDlMailFormat(self, time, dayend):
mail = dict()
if time.timeStamp >= dayend:
date = time.tmrSig
else:
date = time.tdSig
mail['title'] = 'deadlines notices of {}'.format(date)
noticeList = list()
progress = str()
for dl in self.todayNotice:
dist = time.substract(dl[0],date)
content = "{} ({}d): ".format(dl[0], dist)
content += "{}".format(
self.tenw[dl[0]]['DEADLINE'][dl[1]][0])
content += int(dl[2])*' [SUBMITTED]'
noticeList.append(content)
freedays = dist - len(progress)/2
if freedays < 0 and progress != str():
progress = progress[:-2] + str(int(progress[-2:-1])+1) + ' '
elif freedays >= 0:
progress += '- ' * freedays + '1 '
mail['details'] = noticeList
mail['chart'] = ['today > ' + progress]
return mail
def delete(self, tdSig, order):
mail = dict()
date = self.dateRegul(order[0], tdSig)
if date not in self.tenw:
mail['tenw'] = '{} is empty, nothing to be deleted'.format(
date)
self.dump()
return mail
dln = self.tenw[date]['DEADLINE'] != []
if len(order) == 1:
if date == tdSig and dln:
mail['plan_changed'] = True
del self.tenw[date]
mail['tenw'] = 'deleted whole {}'.format(date)
if dln: self.collect(tdSig)
self.dump()
return mail
if order[1].lower() == 'td':
kind = 'TODO'
else:
kind = 'DEADLINE'
if len(order) == 2:
if date == tdSig and dln:
mail['plan_changed'] = True
self.tenw[date][kind] = []
mail['tenw'] = "{} of {} is emptied".format(
kind, date)
if self.tenw[date] == {'TODO':[], 'DEADLINE':[]}:
del self.tenw[date]
mail['tenw'] += ", turn out {} is now empty, deleted it.".format(date)
if dln: self.collect(tdSig)
self.dump()
return mail
num = int(order[2]) - 1
if not num < len(self.tenw[date][kind]):
mail['tenw'] = "{} of {} does not have at least {} elements".format(
kind, date, num + 1)
return mail
if date == tdSig and kind == 'DEADLINE':
mail['plan_changed'] = True
del self.tenw[date][kind][num]
mail['tenw'] = "{}#{} of {} is removed".format(
kind, num+1, date)
if self.tenw[date] == {'TODO':[], 'DEADLINE':[]}:
del self.tenw[date]
mail['tenw'] += ", turn out {} is now empty, deleted it.".format(date)
if kind == 'DEADLINE': self.collect(tdSig)
self.dump()
return mail
def query(self, time, order):
mail = dict()
if len(order) == 1:
if order[0].lower() == 'ten':
return self.calendar()
else:
from_str = to_str = self.dateRegul(order[0],time.tdSig)
if len(order) == 2:
from_str = self.dateRegul(order[0],time.tdSig)
to_str = self.dateRegul(order[1],time.tdSig)
dayList = time.daySeri(from_str, to_str)
content = ['calendar events from {} to {}:'.format(
from_str, to_str)]
for day in dayList:
if day in self.tenw:
content.append('\n[[{}]]:'.format(day))
todos = self.tenw[day]['TODO']
if todos != list():
content.append(' (todos)')
for todo in todos:
content.append('. {}'.format(todo))
dlns = self.tenw[day]['DEADLINE']
if dlns != list():
content.append(' (deadlines)')
for dln in dlns:
content.append('. {}'.format(
dln[0])+int(dln[1])*' [SUBMITTED]')
if len(content) == 1: content.append('nothing')
if len(content) >= 15:
mail['transfer'] = content[0]
mail['tenw'] = '\n\t'.join(content)
return mail
def todos(self, dateSig):
if dateSig not in self.tenw:
return []
else:
content = [] + self.tenw[dateSig]['TODO']
return content
def dlns(self, dateSig):
if dateSig not in self.tenw:
return []
else:
content = []
for dln in self.tenw[dateSig]['DEADLINE']:
if not dln[1]:
content.append(
'{} [NOT YET SUBMITTED]'.format(dln[0]))
else:
content.append(
'{} [SUBMITTED]'.format(dln[0]))
return content
def calendar(self):
mail = dict()
mail['transfer'] = 'next ten weeks calendar'
mail['message'] = 'not yet implemented'
return mail | thtrieu/nyose | TenWeek.py | Python | gpl-3.0 | 9,256 |
import logging
import os
import os.path
import shutil
import subprocess
import tempfile
import time
from six.moves import urllib
import uuid
from six.moves.urllib.parse import urlparse # pylint: disable=E0611,F0401
from test.service import ExternalService, SpawnedService
from test.testutil import get_open_port
log = logging.getLogger(__name__)
class Fixture(object):
kafka_version = os.environ.get('KAFKA_VERSION', '0.8.0')
scala_version = os.environ.get("SCALA_VERSION", '2.8.0')
project_root = os.environ.get('PROJECT_ROOT', os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
kafka_root = os.environ.get("KAFKA_ROOT", os.path.join(project_root, 'servers', kafka_version, "kafka-bin"))
ivy_root = os.environ.get('IVY_ROOT', os.path.expanduser("~/.ivy2/cache"))
@classmethod
def download_official_distribution(cls,
kafka_version=None,
scala_version=None,
output_dir=None):
if not kafka_version:
kafka_version = cls.kafka_version
if not scala_version:
scala_version = cls.scala_version
if not output_dir:
output_dir = os.path.join(cls.project_root, 'servers', 'dist')
distfile = 'kafka_%s-%s' % (scala_version, kafka_version,)
url_base = 'https://archive.apache.org/dist/kafka/%s/' % (kafka_version,)
output_file = os.path.join(output_dir, distfile + '.tgz')
if os.path.isfile(output_file):
log.info("Found file already on disk: %s", output_file)
return output_file
# New tarballs are .tgz, older ones are sometimes .tar.gz
try:
url = url_base + distfile + '.tgz'
log.info("Attempting to download %s", url)
response = urllib.request.urlopen(url)
except urllib.error.HTTPError:
log.exception("HTTP Error")
url = url_base + distfile + '.tar.gz'
log.info("Attempting to download %s", url)
response = urllib.request.urlopen(url)
log.info("Saving distribution file to %s", output_file)
with open(output_file, 'w') as output_file_fd:
output_file_fd.write(response.read())
return output_file
@classmethod
def test_resource(cls, filename):
return os.path.join(cls.project_root, "servers", cls.kafka_version, "resources", filename)
@classmethod
def kafka_run_class_args(cls, *args):
result = [os.path.join(cls.kafka_root, 'bin', 'kafka-run-class.sh')]
result.extend(args)
return result
@classmethod
def kafka_run_class_env(cls):
env = os.environ.copy()
env['KAFKA_LOG4J_OPTS'] = "-Dlog4j.configuration=file:%s" % cls.test_resource("log4j.properties")
return env
@classmethod
def render_template(cls, source_file, target_file, binding):
with open(source_file, "r") as handle:
template = handle.read()
with open(target_file, "w") as handle:
handle.write(template.format(**binding))
class ZookeeperFixture(Fixture):
@classmethod
def instance(cls):
if "ZOOKEEPER_URI" in os.environ:
parse = urlparse(os.environ["ZOOKEEPER_URI"])
(host, port) = (parse.hostname, parse.port)
fixture = ExternalService(host, port)
else:
(host, port) = ("127.0.0.1", get_open_port())
fixture = cls(host, port)
fixture.open()
return fixture
def __init__(self, host, port):
self.host = host
self.port = port
self.tmp_dir = None
self.child = None
def out(self, message):
log.info("*** Zookeeper [%s:%d]: %s", self.host, self.port, message)
def open(self):
self.tmp_dir = tempfile.mkdtemp()
self.out("Running local instance...")
log.info(" host = %s", self.host)
log.info(" port = %s", self.port)
log.info(" tmp_dir = %s", self.tmp_dir)
# Generate configs
template = self.test_resource("zookeeper.properties")
properties = os.path.join(self.tmp_dir, "zookeeper.properties")
self.render_template(template, properties, vars(self))
# Configure Zookeeper child process
args = self.kafka_run_class_args("org.apache.zookeeper.server.quorum.QuorumPeerMain", properties)
env = self.kafka_run_class_env()
# Party!
self.out("Starting...")
timeout = 5
max_timeout = 30
backoff = 1
while True:
self.child = SpawnedService(args, env)
self.child.start()
timeout = min(timeout, max_timeout)
if self.child.wait_for(r"binding to port", timeout=timeout):
break
self.child.stop()
timeout *= 2
time.sleep(backoff)
self.out("Done!")
def close(self):
self.out("Stopping...")
self.child.stop()
self.child = None
self.out("Done!")
shutil.rmtree(self.tmp_dir)
class KafkaFixture(Fixture):
@classmethod
def instance(cls, broker_id, zk_host, zk_port, zk_chroot=None, replicas=1, partitions=2):
if zk_chroot is None:
zk_chroot = "kafka-python_" + str(uuid.uuid4()).replace("-", "_")
if "KAFKA_URI" in os.environ:
parse = urlparse(os.environ["KAFKA_URI"])
(host, port) = (parse.hostname, parse.port)
fixture = ExternalService(host, port)
else:
(host, port) = ("127.0.0.1", get_open_port())
fixture = KafkaFixture(host, port, broker_id, zk_host, zk_port, zk_chroot, replicas, partitions)
fixture.open()
return fixture
def __init__(self, host, port, broker_id, zk_host, zk_port, zk_chroot, replicas=1, partitions=2):
self.host = host
self.port = port
self.broker_id = broker_id
self.zk_host = zk_host
self.zk_port = zk_port
self.zk_chroot = zk_chroot
self.replicas = replicas
self.partitions = partitions
self.tmp_dir = None
self.child = None
self.running = False
def out(self, message):
log.info("*** Kafka [%s:%d]: %s", self.host, self.port, message)
def open(self):
if self.running:
self.out("Instance already running")
return
self.tmp_dir = tempfile.mkdtemp()
self.out("Running local instance...")
log.info(" host = %s", self.host)
log.info(" port = %s", self.port)
log.info(" broker_id = %s", self.broker_id)
log.info(" zk_host = %s", self.zk_host)
log.info(" zk_port = %s", self.zk_port)
log.info(" zk_chroot = %s", self.zk_chroot)
log.info(" replicas = %s", self.replicas)
log.info(" partitions = %s", self.partitions)
log.info(" tmp_dir = %s", self.tmp_dir)
# Create directories
os.mkdir(os.path.join(self.tmp_dir, "logs"))
os.mkdir(os.path.join(self.tmp_dir, "data"))
# Generate configs
template = self.test_resource("kafka.properties")
properties = os.path.join(self.tmp_dir, "kafka.properties")
self.render_template(template, properties, vars(self))
# Party!
self.out("Creating Zookeeper chroot node...")
args = self.kafka_run_class_args("org.apache.zookeeper.ZooKeeperMain",
"-server", "%s:%d" % (self.zk_host, self.zk_port),
"create",
"/%s" % self.zk_chroot,
"kafka-python")
env = self.kafka_run_class_env()
proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.wait() != 0:
self.out("Failed to create Zookeeper chroot node")
self.out(proc.stdout.read())
self.out(proc.stderr.read())
raise RuntimeError("Failed to create Zookeeper chroot node")
self.out("Done!")
self.out("Starting...")
# Configure Kafka child process
args = self.kafka_run_class_args("kafka.Kafka", properties)
env = self.kafka_run_class_env()
timeout = 5
max_timeout = 30
backoff = 1
while True:
self.child = SpawnedService(args, env)
self.child.start()
timeout = min(timeout, max_timeout)
if self.child.wait_for(r"\[Kafka Server %d\], Started" %
self.broker_id, timeout=timeout):
break
self.child.stop()
timeout *= 2
time.sleep(backoff)
self.out("Done!")
self.running = True
def close(self):
if not self.running:
self.out("Instance already stopped")
return
self.out("Stopping...")
self.child.stop()
self.child = None
self.out("Done!")
shutil.rmtree(self.tmp_dir)
self.running = False
| gamechanger/kafka-python | test/fixtures.py | Python | apache-2.0 | 9,197 |
from ldapcherry.roles import Roles
from ldapcherry.exceptions import DumplicateRoleKey, MissingKey, DumplicateRoleContent, MissingRolesFile
from ldapcherry.pyyamlwrapper import DumplicatedKey, RelationError
from yaml import load, dump
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
class CustomDumper(yaml.SafeDumper):
"A custom YAML dumper that never emits aliases"
def ignore_aliases(self, _data):
return True
inv = Roles('./conf/roles.yml')
print
print inv.dump_nest()
groups = {
'ad' : ['Domain Users', 'Domain Users 2'],
'ldap': ['cn=users,ou=group,dc=example,dc=com']
}
print inv.get_roles(groups)
groups = {
'ad' : ['Domain Users', 'Domain Users 2'],
'ldap': ['cn=users,ou=group,dc=example,dc=com',
'cn=nagios admins,ou=group,dc=example,dc=com',
'cn=developers,ou=group,dc=example,dc=com',
],
'toto': ['not a group'],
}
print inv.get_roles(groups)
print inv.get_allroles()
print inv.get_backends()
| kakwa/ldapcherry | misc/debug_roles.py | Python | mit | 1,028 |
import os
import sys
from copy import copy
from watchdog.events import RegexMatchingEventHandler
if sys.platform == "darwin":
from watchdog.observers.polling import PollingObserver as Observer
else:
from watchdog.observers import Observer
from microproxy.log import ProxyLogger
logger = ProxyLogger.get_logger(__name__)
class PluginEventHandler(RegexMatchingEventHandler):
def __init__(self, filename, callback):
super(PluginEventHandler, self).__init__(ignore_directories=True,
regexes=['.*' + filename])
self.callback = callback
def on_modified(self, event):
self.callback()
class Plugin(object):
PLUGIN_METHODS = ["on_request", "on_response"]
def __init__(self, plugin_path):
self.plugin_path = os.path.abspath(plugin_path)
self.plugin_name = os.path.basename(self.plugin_path)
self.plugin_dir = os.path.dirname(self.plugin_path)
self.namespace = None
self._load_plugin()
self._register_watcher()
def _register_watcher(self):
logger.debug("Register File Watcher for {0}".format(self.plugin_name))
self.event_handler = PluginEventHandler(self.plugin_name,
self._reload_plugin)
self.observer = Observer()
self.observer.schedule(self.event_handler, self.plugin_dir)
self.observer.start()
def _load_plugin(self):
sys.path.append(os.path.dirname(self.plugin_path))
try:
with open(self.plugin_path) as fp:
self.namespace = {"__file__": self.plugin_path}
code = compile(fp.read(), self.plugin_path, "exec")
exec (code, self.namespace, self.namespace)
except Exception as e:
logger.exception(e)
sys.path.pop()
logger.info("Load Plugin : {0}".format(self.plugin_name))
def _reload_plugin(self):
logger.info("Reload Plugin : {0}".format(self.plugin_name))
self._load_plugin()
def __getattr__(self, attr):
if attr not in self.PLUGIN_METHODS:
raise AttributeError
try:
return self.namespace[attr]
except KeyError:
raise AttributeError
class PluginManager(object):
def __init__(self, config):
self.plugins = []
self.load_plugins(config["plugins"])
def load_plugins(self, plugin_paths):
for plugin_path in plugin_paths:
plugin = Plugin(plugin_path)
self.plugins.append(plugin)
def exec_request(self, plugin_context):
if len(self.plugins) == 0:
return plugin_context
current_context = copy(plugin_context)
for plugin in self.plugins:
try:
new_context = plugin.on_request(current_context)
current_context = copy(new_context)
except AttributeError:
logger.debug(
"Plugin {0} does not have on_request".format(
plugin.namespace["__file__"].split("/")[-1]))
return current_context
def exec_response(self, plugin_context):
if len(self.plugins) == 0:
return plugin_context
current_context = copy(plugin_context)
for plugin in self.plugins:
try:
new_context = plugin.on_response(current_context)
current_context = copy(new_context)
except AttributeError:
logger.debug(
"Plugin {0} does not have on_response".format(
plugin.namespace["__file__"].split("/")[-1]))
return current_context
| mike820324/microProxy | microproxy/interceptor/plugin_manager.py | Python | mit | 3,707 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import mock
from openstackclient.common import context
from openstackclient.tests import utils
class TestContext(utils.TestCase):
def test_log_level_from_options(self):
opts = mock.Mock()
opts.verbose_level = 0
self.assertEqual(logging.ERROR, context.log_level_from_options(opts))
opts.verbose_level = 1
self.assertEqual(logging.WARNING, context.log_level_from_options(opts))
opts.verbose_level = 2
self.assertEqual(logging.INFO, context.log_level_from_options(opts))
opts.verbose_level = 3
self.assertEqual(logging.DEBUG, context.log_level_from_options(opts))
def test_log_level_from_config(self):
cfg = {'verbose_level': 0}
self.assertEqual(logging.ERROR, context.log_level_from_config(cfg))
cfg = {'verbose_level': 1}
self.assertEqual(logging.WARNING, context.log_level_from_config(cfg))
cfg = {'verbose_level': 2}
self.assertEqual(logging.INFO, context.log_level_from_config(cfg))
cfg = {'verbose_level': 3}
self.assertEqual(logging.DEBUG, context.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'critical'}
self.assertEqual(logging.CRITICAL, context.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'error'}
self.assertEqual(logging.ERROR, context.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'warning'}
self.assertEqual(logging.WARNING, context.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'info'}
self.assertEqual(logging.INFO, context.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'debug'}
self.assertEqual(logging.DEBUG, context.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'bogus'}
self.assertEqual(logging.WARNING, context.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'info', 'debug': True}
self.assertEqual(logging.DEBUG, context.log_level_from_config(cfg))
@mock.patch('warnings.simplefilter')
def test_set_warning_filter(self, simplefilter):
context.set_warning_filter(logging.ERROR)
simplefilter.assert_called_with("ignore")
context.set_warning_filter(logging.WARNING)
simplefilter.assert_called_with("ignore")
context.set_warning_filter(logging.INFO)
simplefilter.assert_called_with("once")
class TestFileFormatter(utils.TestCase):
def test_nothing(self):
formatter = context._FileFormatter()
self.assertEqual(('%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s %(message)s'), formatter.fmt)
def test_options(self):
class Opts(object):
cloud = 'cloudy'
os_project_name = 'projecty'
username = 'usernamey'
options = Opts()
formatter = context._FileFormatter(options=options)
self.assertEqual(('%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [cloudy usernamey projecty] %(message)s'),
formatter.fmt)
def test_config(self):
config = mock.Mock()
config.config = {'cloud': 'cloudy'}
config.auth = {'project_name': 'projecty', 'username': 'usernamey'}
formatter = context._FileFormatter(config=config)
self.assertEqual(('%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [cloudy usernamey projecty] %(message)s'),
formatter.fmt)
class TestLogConfigurator(utils.TestCase):
def setUp(self):
super(TestLogConfigurator, self).setUp()
self.options = mock.Mock()
self.options.verbose_level = 1
self.options.log_file = None
self.options.debug = False
self.root_logger = mock.Mock()
self.root_logger.setLevel = mock.Mock()
self.root_logger.addHandler = mock.Mock()
self.requests_log = mock.Mock()
self.requests_log.setLevel = mock.Mock()
self.cliff_log = mock.Mock()
self.cliff_log.setLevel = mock.Mock()
self.stevedore_log = mock.Mock()
self.stevedore_log.setLevel = mock.Mock()
self.iso8601_log = mock.Mock()
self.iso8601_log.setLevel = mock.Mock()
self.loggers = [
self.root_logger,
self.requests_log,
self.cliff_log,
self.stevedore_log,
self.iso8601_log]
@mock.patch('logging.StreamHandler')
@mock.patch('logging.getLogger')
@mock.patch('openstackclient.common.context.set_warning_filter')
def test_init(self, warning_filter, getLogger, handle):
getLogger.side_effect = self.loggers
console_logger = mock.Mock()
console_logger.setFormatter = mock.Mock()
console_logger.setLevel = mock.Mock()
handle.return_value = console_logger
configurator = context.LogConfigurator(self.options)
getLogger.assert_called_with('iso8601') # last call
warning_filter.assert_called_with(logging.WARNING)
self.root_logger.setLevel.assert_called_with(logging.DEBUG)
self.root_logger.addHandler.assert_called_with(console_logger)
self.requests_log.setLevel.assert_called_with(logging.ERROR)
self.cliff_log.setLevel.assert_called_with(logging.ERROR)
self.stevedore_log.setLevel.assert_called_with(logging.ERROR)
self.iso8601_log.setLevel.assert_called_with(logging.ERROR)
self.assertEqual(False, configurator.dump_trace)
@mock.patch('logging.getLogger')
@mock.patch('openstackclient.common.context.set_warning_filter')
def test_init_no_debug(self, warning_filter, getLogger):
getLogger.side_effect = self.loggers
self.options.debug = True
configurator = context.LogConfigurator(self.options)
warning_filter.assert_called_with(logging.DEBUG)
self.requests_log.setLevel.assert_called_with(logging.DEBUG)
self.assertEqual(True, configurator.dump_trace)
@mock.patch('logging.FileHandler')
@mock.patch('logging.getLogger')
@mock.patch('openstackclient.common.context.set_warning_filter')
@mock.patch('openstackclient.common.context._FileFormatter')
def test_init_log_file(self, formatter, warning_filter, getLogger, handle):
getLogger.side_effect = self.loggers
self.options.log_file = '/tmp/log_file'
file_logger = mock.Mock()
file_logger.setFormatter = mock.Mock()
file_logger.setLevel = mock.Mock()
handle.return_value = file_logger
mock_formatter = mock.Mock()
formatter.return_value = mock_formatter
context.LogConfigurator(self.options)
handle.assert_called_with(filename=self.options.log_file)
self.root_logger.addHandler.assert_called_with(file_logger)
file_logger.setFormatter.assert_called_with(mock_formatter)
file_logger.setLevel.assert_called_with(logging.WARNING)
@mock.patch('logging.FileHandler')
@mock.patch('logging.getLogger')
@mock.patch('openstackclient.common.context.set_warning_filter')
@mock.patch('openstackclient.common.context._FileFormatter')
def test_configure(self, formatter, warning_filter, getLogger, handle):
getLogger.side_effect = self.loggers
configurator = context.LogConfigurator(self.options)
cloud_config = mock.Mock()
config_log = '/tmp/config_log'
cloud_config.config = {
'log_file': config_log,
'verbose_level': 1,
'log_level': 'info'}
file_logger = mock.Mock()
file_logger.setFormatter = mock.Mock()
file_logger.setLevel = mock.Mock()
handle.return_value = file_logger
mock_formatter = mock.Mock()
formatter.return_value = mock_formatter
configurator.configure(cloud_config)
warning_filter.assert_called_with(logging.INFO)
handle.assert_called_with(filename=config_log)
self.root_logger.addHandler.assert_called_with(file_logger)
file_logger.setFormatter.assert_called_with(mock_formatter)
file_logger.setLevel.assert_called_with(logging.INFO)
self.assertEqual(False, configurator.dump_trace)
| sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/openstackclient/tests/common/test_context.py | Python | mit | 8,911 |
#This program will create random numbers for all of the different
# types of dice
#import necessary libraries
from random import randint
#defining the functions
def d4():
d4 = randint(1, 4)
return d4
def d6():
d6 = randint(1, 6)
return d6
def d8():
d8 = randint(1, 8)
return d8
def d12():
d12 = randint(1, 12)
return d12
def d10():
d10 = randint(0, 9)
return d10
def d10p():
rand_num = randint(0, 9)
p = rand_num*10
if p == 0:
p = 100
d10p = p + d10()
return int(d10p)
def d20():
d20 = randint(1, 20)
return d20
#############################################################################
#############################################################################
#Functions for multiple rolling
def d4_2():
roll = d4() + d4()
return roll
def d4_3():
roll = d4() + d4() + d4()
return roll
def d4_4():
roll = d4() + d4() + d4() + d4()
return roll
def d4_5():
roll = d4() + d4() + d4() + d4() + d4()
return roll
def d6_2():
roll = d6() + d6()
return roll
def d6_3():
roll = d6() + d6() + d6()
return roll
def d6_4():
roll = d6() + d6() + d6() + d6()
return roll
def d6_5():
roll = d6() + d6() + d6() + d6() + d6()
return roll
def d8_2():
roll = d8() + d8()
return roll
def d8_3():
roll = d8() + d8() + d8()
return roll
def d8_4():
roll = d8() + d8() + d8() + d8()
return roll
def d12_2():
roll = d12() + d12()
return roll
def d10_2():
roll = d10() + d10()
return roll
def d10_3():
roll = d10() + d10() + d10()
return roll
def d10_4():
roll = d10() + d10() + d10() + d10()
return roll
def d10_5():
roll = d10() + d10() + d10() + d10() + d10()
return roll
def d10_6():
roll = d10() + d10() + d10() + d10() + d10() \
+ d10()
return roll
def d10_7():
roll = d10() + d10() + d10() + d10() + d10() \
+ d10() + d10()
return roll
def d10_8():
roll = d10() + d10() + d10() + d10() + d10() \
+ d10() + d10() + d10()
return roll
def d10_9():
roll = d10() + d10() + d10() + d10() + d10() \
+ d10() + d10() + d10() + d10()
return roll
def d10_10():
roll = d10() + d10() + d10() + d10() + d10() \
+ d10() + d10() + d10() + d10() + d10()
return roll
def d20_2():
roll = d20() + d20()
return roll
#############################################################################
#############################################################################
| CelebornMcGinnis/Dungeons-and-Monsters | RandomDiceNumbers.py | Python | mit | 2,605 |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import cirq
from cirq import value
from cirq.transformers.heuristic_decompositions.two_qubit_gate_tabulation import (
two_qubit_gate_product_tabulation,
TwoQubitGateTabulation,
)
from cirq.transformers.heuristic_decompositions.gate_tabulation_math_utils import (
unitary_entanglement_fidelity,
)
from cirq.testing import random_special_unitary, assert_equivalent_repr
_rng = value.parse_random_state(11) # for determinism
sycamore_tabulation = two_qubit_gate_product_tabulation(
cirq.unitary(cirq.FSimGate(np.pi / 2, np.pi / 6)), 0.2, random_state=_rng
)
sqrt_iswap_tabulation = two_qubit_gate_product_tabulation(
cirq.unitary(cirq.FSimGate(np.pi / 4, np.pi / 24)), 0.1, random_state=_rng
)
_random_2Q_unitaries = np.array([random_special_unitary(4, random_state=_rng) for _ in range(100)])
@pytest.mark.parametrize('tabulation', [sycamore_tabulation, sqrt_iswap_tabulation])
@pytest.mark.parametrize('target', _random_2Q_unitaries)
def test_gate_compilation_matches_expected_max_infidelity(tabulation, target):
result = tabulation.compile_two_qubit_gate(target)
assert result.success
max_error = tabulation.max_expected_infidelity
assert 1 - unitary_entanglement_fidelity(target, result.actual_gate) < max_error
@pytest.mark.parametrize('tabulation', [sycamore_tabulation, sqrt_iswap_tabulation])
def test_gate_compilation_on_base_gate_standard(tabulation):
base_gate = tabulation.base_gate
result = tabulation.compile_two_qubit_gate(base_gate)
assert len(result.local_unitaries) == 2
assert result.success
fidelity = unitary_entanglement_fidelity(result.actual_gate, base_gate)
assert fidelity > 0.99999
def test_gate_compilation_on_base_gate_identity():
tabulation = two_qubit_gate_product_tabulation(np.eye(4), 0.25)
base_gate = tabulation.base_gate
result = tabulation.compile_two_qubit_gate(base_gate)
assert len(result.local_unitaries) == 2
assert result.success
fidelity = unitary_entanglement_fidelity(result.actual_gate, base_gate)
assert fidelity > 0.99999
def test_gate_compilation_missing_points_raises_error():
with pytest.raises(ValueError, match='Failed to tabulate a'):
two_qubit_gate_product_tabulation(
np.eye(4), 0.4, allow_missed_points=False, random_state=_rng
)
@pytest.mark.parametrize('seed', [0, 1])
def test_sycamore_gate_tabulation(seed):
base_gate = cirq.unitary(cirq.FSimGate(np.pi / 2, np.pi / 6))
tab = two_qubit_gate_product_tabulation(
base_gate, 0.1, sample_scaling=2, random_state=np.random.RandomState(seed)
)
result = tab.compile_two_qubit_gate(base_gate)
assert result.success
def test_sycamore_gate_tabulation_repr():
simple_tabulation = TwoQubitGateTabulation(
np.array([[(1 + 0j), 0j, 0j, 0j]], dtype=np.complex128),
np.array([[(1 + 0j), 0j, 0j, 0j]], dtype=np.complex128),
[[]],
0.49,
'Sample string',
(),
)
assert_equivalent_repr(simple_tabulation)
def test_sycamore_gate_tabulation_eq():
assert sycamore_tabulation == sycamore_tabulation
assert sycamore_tabulation != sqrt_iswap_tabulation
assert sycamore_tabulation != 1
| quantumlib/Cirq | cirq-core/cirq/transformers/heuristic_decompositions/two_qubit_gate_tabulation_test.py | Python | apache-2.0 | 3,830 |
import json
import mimetypes
import os
from flask import send_file, request, flash, redirect, jsonify
from utils import random_string
ALLOWED_EXTENSIONS = set(['png'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def add_routes(app):
@app.route('/api/blobs/<blob_id>', methods=['GET'])
def get_file(blob_id):
filename = os.path.join(app.config['UPLOAD_FOLDER'], blob_id)
meta_file_content = open(filename + '.json').read()
mime_type = mimetypes.guess_type(json.loads(meta_file_content)['filename'])
return send_file(filename, mimetype=mime_type[0])
@app.route('/api/blobs', methods=['POST'])
def upload_file():
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
blob_id = random_string(20)
file.save(blob_file_path(blob_id))
meta_file = open(os.path.join(app.config['UPLOAD_FOLDER'], blob_id + '.json'), 'w')
meta_file.write(json.dumps({
"filename": file.filename
}))
meta_file.close()
return jsonify({
'blobId': blob_id
})
def blob_file_path(blob_id):
return os.path.join(app.config['UPLOAD_FOLDER'], blob_id)
| boehlke/openslides-multiinstance-backend | python/multiinstance/upload.py | Python | mit | 1,700 |
# -*- python -*-
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Users/build/work/GCC-7-build/install-native/share/gcc-arm-none-eabi'
libdir = '/Users/build/work/GCC-7-build/install-native/arm-none-eabi/lib/thumb/v7-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| jocelynmass/nrf51 | toolchain/arm_cm0/arm-none-eabi/lib/thumb/v7-m/libstdc++.a-gdb.py | Python | gpl-2.0 | 2,482 |
from flask import Blueprint
from flask import current_app
from flask import request
from flask import jsonify
from flask import abort
from flask import render_template
from flask import redirect
from flask import url_for
from flask import flash
from werkzeug.exceptions import NotFound
from printus.web.models import Report
from printus.web.models import User
from printus.web.forms import UserForm
from printus.web.forms import ReportForm
from printus.web.forms import LoginForm
from printus.web.forms import SignupForm
from printus.web.forms import ContactForm
from printus.web.extensions import login_manager
from flask.ext.login import login_required, current_user, login_user, logout_user
bp = Blueprint('general', __name__, template_folder='templates')
@bp.route('/')
@login_required
def index():
try:
page = long(request.args.get('page', 1))
except Exception:
page = 1
try:
pagination = current_user.reports.order_by('created_at desc').paginate(page, 10)
except NotFound:
page = 1
pagination = current_user.reports.order_by('created_at desc').paginate(page, 10)
return render_template('reports.index.html', pagination=pagination)
@bp.route('/reports/new', methods=['GET', 'POST'])
@login_required
def reports_new():
form = ReportForm()
if form.validate_on_submit():
flash('Report created')
return redirect(url_for('general.index'))
return render_template('reports.new.html', form=form)
@bp.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
form = UserForm(obj=current_user)
if form.validate_on_submit():
form.populate_obj(current_user)
db.session.add(current_user)
db.session.commit()
return render_template('profile.html', form=form)
@bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
signupForm = SignupForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data, password=form.password.data).first()
if not user:
return render_template("login.html", form=form, signupForm=signupForm)
else:
login_user(user)
return redirect(request.args.get("next") or url_for("general.index"))
return render_template("login.html", form=form, signupForm=signupForm)
@bp.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm()
if form.validate_on_submit():
return redirect(request.args.get('next') or url_for('general.index'))
return render_template("signup.html", form=form)
@bp.route('/logout')
@login_required
def logout():
logout_user()
flash('Logged out.')
return redirect(url_for('general.index'))
@bp.route('/contact_us')
@login_required
def contact_us():
form = ContactForm()
if form.validate_on_submit():
return redirect(url_for('general.index'))
return render_template('contact_us.html', form=form)
| matrixise/printus | old_code/printus/web/views/general/__init__.py | Python | mit | 2,782 |
# LADITools - Linux Audio Desktop Integration Tools
# Copyright (C) 2007-2010, Marc-Olivier Barre <marco@marcochapeau.org>
# Copyright (C) 2007-2009, Nedko Arnaudov <nedko@arnaudov.name>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import dbus
from dbus.mainloop.glib import DBusGMainLoop
name_base = 'org.jackaudio'
controller_interface_name = name_base + '.Configure'
service_name = name_base + '.service'
def dbus_type_to_python_type (dbus_value):
if type (dbus_value) == dbus.Boolean:
return bool(dbus_value)
if type (dbus_value) == dbus.Int32 or type (dbus_value) == dbus.UInt32:
return int(dbus_value)
if type (dbus_value) == dbus.String:
return str(dbus_value)
if type (dbus_value) == dbus.Byte:
return str (dbus_value)
return dbus_value
class jack_configure:
def __init__ (self):
# Connect to the bus
self.bus = dbus.SessionBus ()
self.controller = self.bus.get_object (service_name, "/org/jackaudio/Controller")
self.iface = dbus.Interface (self.controller, controller_interface_name)
# self.bus.add_signal_receiver (self.name_owner_changed, dbus_interface = controller_interface_name, signal_name = "NameOwnerChanged")
def name_owner_changed (name = None, old_owner = None, new_owner = None):
print "Name changed : %r" % name
def get_selected_driver (self):
isset, default, value = self.iface.GetParameterValue (['engine', 'driver'])
return value
def read_container (self, path):
is_leaf, children = self.iface.ReadContainer (path)
if is_leaf:
return []
return children
def get_param_names (self, path):
is_leaf, children = self.iface.ReadContainer (path)
if not is_leaf:
return []
return children
def get_param_short_description (self, path):
type_char, name, short_descr, long_descr = self.iface.GetParameterInfo (path)
return short_descr
def get_param_long_description (self, path):
type_char, name, short_descr, long_descr = self.iface.GetParameterInfo (path)
return long_descr
def get_param_type (self, path):
type_char, name, short_descr, long_descr = self.iface.GetParameterInfo (path)
return str (type_char)
def get_param_value (self, path):
isset, default, value = self.iface.GetParameterValue (path)
isset = bool (isset)
default = dbus_type_to_python_type (default)
value = dbus_type_to_python_type (value)
return isset, default, value
def set_param_value (self, path, value):
typestr = self.get_param_type (path)
if typestr == "b":
value = dbus.Boolean (value)
elif typestr == "y":
value = dbus.Byte (value)
elif typestr == "i":
value = dbus.Int32 (value)
elif typestr == "u":
value = dbus.UInt32 (value)
self.iface.SetParameterValue (path, value)
def reset_param_value (self, path):
self.iface.ResetParameterValue (path)
def param_has_range (self, path):
is_range, is_strict, is_fake_value, values = self.iface.GetParameterConstraint (path)
return bool (is_range)
def param_get_range (self, path):
is_range, is_strict, is_fake_value, values = self.iface.GetParameterConstraint (path)
if not is_range or len (values) != 2:
return -1, -1
return dbus_type_to_python_type (values[0][0]), dbus_type_to_python_type (values[1][0])
def param_has_enum (self, path):
is_range, is_strict, is_fake_value, values = self.iface.GetParameterConstraint (path)
return not is_range and len (values) != 0
def param_is_strict_enum (self, path):
is_range, is_strict, is_fake_value, values = self.iface.GetParameterConstraint (path)
return is_strict
def param_is_fake_value (self, path):
is_range, is_strict, is_fake_value, values = self.iface.GetParameterConstraint (path)
return is_fake_value
def param_get_enum_values (self, path):
is_range, is_strict, is_fake_value, dbus_values = self.iface.GetParameterConstraint (path)
values = []
if not is_range and len (dbus_values) != 0:
for dbus_value in dbus_values:
values.append ([dbus_type_to_python_type (dbus_value[0]), dbus_type_to_python_type (dbus_value[1])])
return values
| LADI/laditools | laditools/jack_configure.py | Python | gpl-3.0 | 5,047 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover the adwords module."""
__author__ = 'Joseph DiLallo'
import io
import sys
import tempfile
import unittest
import urllib
import urllib2
import mock
import googleads.adwords
import googleads.common
import googleads.errors
PYTHON2 = sys.version_info[0] == 2
URL_REQUEST_PATH = ('urllib2' if PYTHON2 else 'urllib.request')
CURRENT_VERSION = sorted(googleads.adwords._SERVICE_MAP.keys())[-1]
class AdWordsHeaderHandlerTest(unittest.TestCase):
"""Tests for the googleads.adwords._AdWordsHeaderHandler class."""
def setUp(self):
self.adwords_client = mock.Mock()
self.header_handler = googleads.adwords._AdWordsHeaderHandler(
self.adwords_client, CURRENT_VERSION)
def testSetHeaders(self):
suds_client = mock.Mock()
ccid = 'client customer id'
dev_token = 'developer token'
user_agent = 'user agent!'
validate_only = True
partial_failure = False
oauth_header = {'oauth': 'header'}
self.adwords_client.client_customer_id = ccid
self.adwords_client.developer_token = dev_token
self.adwords_client.user_agent = user_agent
self.adwords_client.validate_only = validate_only
self.adwords_client.partial_failure = partial_failure
self.adwords_client.oauth2_client.CreateHttpHeader.return_value = (
oauth_header)
self.header_handler.SetHeaders(suds_client)
# Check that the SOAP header has the correct values.
suds_client.factory.create.assert_called_once_with(
'{https://adwords.google.com/api/adwords/cm/%s}SoapHeader' %
CURRENT_VERSION)
soap_header = suds_client.factory.create.return_value
self.assertEqual(ccid, soap_header.clientCustomerId)
self.assertEqual(dev_token, soap_header.developerToken)
self.assertEqual(
''.join([user_agent, googleads.adwords._AdWordsHeaderHandler._LIB_SIG]),
soap_header.userAgent)
self.assertEqual(validate_only, soap_header.validateOnly)
self.assertEqual(partial_failure, soap_header.partialFailure)
# Check that the suds client has the correct values.
suds_client.set_options.assert_any_call(
soapheaders=soap_header, headers=oauth_header)
def testGetReportDownloadHeaders(self):
ccid = 'client customer id'
dev_token = 'developer token'
user_agent = 'user agent!'
oauth_header = {'Authorization': 'header'}
self.adwords_client.client_customer_id = ccid
self.adwords_client.developer_token = dev_token
self.adwords_client.user_agent = user_agent
self.adwords_client.oauth2_client.CreateHttpHeader.return_value = dict(
oauth_header)
expected_return_value = {
'Content-type': 'application/x-www-form-urlencoded',
'developerToken': dev_token,
'clientCustomerId': ccid,
'Authorization': 'header',
'User-Agent': ''.join([
user_agent, googleads.adwords._AdWordsHeaderHandler._LIB_SIG,
',gzip'])
}
self.adwords_client.oauth2_client.CreateHttpHeader.return_value = dict(
oauth_header)
self.assertEqual(expected_return_value,
self.header_handler.GetReportDownloadHeaders(
skip_report_header=False, skip_report_summary=False))
class AdWordsClientTest(unittest.TestCase):
"""Tests for the googleads.adwords.AdWordsClient class."""
def setUp(self):
oauth_header = {'Authorization': 'header'}
self.cache = None
self.client_customer_id = 'client customer id'
self.dev_token = 'developers developers developers'
self.user_agent = 'users users user'
self.oauth2_client = mock.Mock()
self.oauth2_client.CreateHttpHeader.return_value = dict(oauth_header)
self.https_proxy = 'myproxy:443'
self.adwords_client = googleads.adwords.AdWordsClient(
self.dev_token, self.oauth2_client, self.user_agent,
client_customer_id=self.client_customer_id,
https_proxy=self.https_proxy, cache=self.cache)
self.header_handler = googleads.adwords._AdWordsHeaderHandler(
self.adwords_client, CURRENT_VERSION)
def testLoadFromStorage(self):
with mock.patch('googleads.common.LoadFromStorage') as mock_load:
mock_load.return_value = {
'developer_token': 'abcdEFghIjkLMOpqRs',
'oauth2_client': True,
'user_agent': 'unit testing'
}
self.assertIsInstance(googleads.adwords.AdWordsClient.LoadFromStorage(),
googleads.adwords.AdWordsClient)
def testGetService_success(self):
version = CURRENT_VERSION
service = googleads.adwords._SERVICE_MAP[version].keys()[0]
namespace = googleads.adwords._SERVICE_MAP[version][service]
# Use a custom server. Also test what happens if the server ends with a
# trailing slash
server = 'https://testing.test.com/'
https_proxy = {'https': self.https_proxy}
with mock.patch('suds.client.Client') as mock_client:
suds_service = self.adwords_client.GetService(service, version, server)
mock_client.assert_called_once_with(
'https://testing.test.com/api/adwords/%s/%s/%s?wsdl'
% (namespace, version, service), proxy=https_proxy, cache=self.cache,
timeout=3600)
self.assertIsInstance(suds_service, googleads.common.SudsServiceProxy)
# Use the default server and https_proxy.
self.adwords_client.https_proxy = None
with mock.patch('suds.client.Client') as mock_client:
suds_service = self.adwords_client.GetService(service, version)
mock_client.assert_called_once_with(
'https://adwords.google.com/api/adwords/%s/%s/%s?wsdl'
% (namespace, version, service), proxy=None, cache=self.cache,
timeout=3600)
self.assertFalse(mock_client.return_value.set_options.called)
self.assertIsInstance(suds_service, googleads.common.SudsServiceProxy)
def testGetService_badService(self):
version = CURRENT_VERSION
self.assertRaises(
googleads.errors.GoogleAdsValueError, self.adwords_client.GetService,
'GYIVyievfyiovslf', version)
def testGetService_badVersion(self):
self.assertRaises(
googleads.errors.GoogleAdsValueError, self.adwords_client.GetService,
'CampaignService', '11111')
def testGetReportDownloader(self):
with mock.patch('googleads.adwords.ReportDownloader') as mock_downloader:
self.assertEqual(
mock_downloader.return_value,
self.adwords_client.GetReportDownloader('version', 'server'))
mock_downloader.assert_called_once_with(
self.adwords_client, 'version', 'server')
def testSetClientCustomerId(self):
suds_client = mock.Mock()
ccid = 'modified'
# Check that the SOAP header has the modified client customer id.
self.adwords_client.SetClientCustomerId(ccid)
self.header_handler.SetHeaders(suds_client)
soap_header = suds_client.factory.create.return_value
self.assertEqual(ccid, soap_header.clientCustomerId)
class ReportDownloaderTest(unittest.TestCase):
"""Tests for the googleads.adwords.ReportDownloader class."""
def setUp(self):
self.version = CURRENT_VERSION
self.marshaller = mock.Mock()
self.header_handler = mock.Mock()
self.adwords_client = mock.Mock()
self.opener = mock.Mock()
self.adwords_client.https_proxy = 'my.proxy.gov:443'
with mock.patch('suds.client.Client'):
with mock.patch('suds.xsd.doctor'):
with mock.patch('suds.mx.literal.Literal') as mock_literal:
with mock.patch(
'googleads.adwords._AdWordsHeaderHandler') as mock_handler:
with mock.patch(
URL_REQUEST_PATH + '.OpenerDirector') as mock_opener:
mock_literal.return_value = self.marshaller
mock_handler.return_value = self.header_handler
mock_opener.return_value = self.opener
self.report_downloader = googleads.adwords.ReportDownloader(
self.adwords_client, self.version)
def testDownloadReport(self):
output_file = io.StringIO()
report_definition = {'table': 'campaigns',
'downloadFormat': 'CSV'}
serialized_report = 'nuinbwuign'
post_body = urllib.urlencode({'__rdxml': serialized_report})
if not PYTHON2:
post_body = bytes(post_body, 'utf-8')
headers = {'Authorization': 'ya29.something'}
self.header_handler.GetReportDownloadHeaders.return_value = headers
content = u'CONTENT STRING 广告客户'
fake_request = io.StringIO() if PYTHON2 else io.BytesIO()
fake_request.write(content if PYTHON2 else bytes(content, 'utf-8'))
fake_request.seek(0)
self.marshaller.process.return_value = serialized_report
with mock.patch('suds.mx.Content') as mock_content:
with mock.patch(URL_REQUEST_PATH + '.Request') as mock_request:
self.opener.open.return_value = fake_request
self.report_downloader.DownloadReport(report_definition, output_file)
mock_request.assert_called_once_with(
('https://adwords.google.com/api/adwords/reportdownload/%s'
% self.version), post_body, headers)
self.opener.open.assert_called_once_with(mock_request.return_value)
self.marshaller.process.assert_called_once_with(
mock_content.return_value)
self.assertEqual(content, output_file.getvalue())
self.header_handler.GetReportDownloadHeaders.assert_called_once_with(
None, None)
def testDownloadReportCheckFormat_CSVStringSuccess(self):
output_file = io.StringIO()
try:
self.report_downloader._DownloadReportCheckFormat('CSV', output_file)
except googleads.errors.GoogleAdsValueError:
self.fail('_DownloadReportCheckFormat raised GoogleAdsValueError'
'unexpectedly!')
def testDownloadReportCheckFormat_GZIPPEDBinaryFileSuccess(self):
output_file = io.StringIO()
try:
self.report_downloader._DownloadReportCheckFormat('CSV', output_file)
except googleads.errors.GoogleAdsValueError:
self.fail('_DownloadReportCheckFormat raised GoogleAdsValueError'
'unexpectedly!')
def testDownloadReportCheckFormat_GZIPPEDBytesIOSuccess(self):
output_file = tempfile.TemporaryFile(mode='wb')
try:
self.report_downloader._DownloadReportCheckFormat('GZIPPED_CSV',
output_file)
except googleads.errors.GoogleAdsValueError:
self.fail('_DownloadReportCheckFormat raised GoogleAdsValueError'
'unexpectedly!')
def testDownloadReportCheckFormat_GZIPPEDStringFailure(self):
output_file = io.StringIO()
self.assertRaises(googleads.errors.GoogleAdsValueError,
self.report_downloader._DownloadReportCheckFormat,
'GZIPPED_CSV', output_file)
def testDownloadReport_failure(self):
output_file = io.StringIO()
report_definition = {'table': 'campaigns',
'downloadFormat': 'CSV'}
serialized_report = 'hjuibnibguo'
post_body = urllib.urlencode({'__rdxml': serialized_report})
if not PYTHON2:
post_body = bytes(post_body, 'utf-8')
headers = {'Authorization': 'ya29.something'}
self.header_handler.GetReportDownloadHeaders.return_value = headers
content = u'Page not found. :-('
fake_request = io.StringIO() if PYTHON2 else io.BytesIO()
fake_request.write(content if PYTHON2 else bytes(content, 'utf-8'))
fake_request.seek(0)
error = urllib2.HTTPError('', 400, 'Bad Request', {}, fp=fake_request)
self.marshaller.process.return_value = serialized_report
with mock.patch('suds.mx.Content') as mock_content:
with mock.patch(URL_REQUEST_PATH + '.Request') as mock_request:
self.opener.open.side_effect = error
self.assertRaises(
googleads.errors.AdWordsReportError,
self.report_downloader.DownloadReport, report_definition,
output_file)
mock_request.assert_called_once_with(
('https://adwords.google.com/api/adwords/reportdownload/%s'
% self.version), post_body, headers)
self.opener.open.assert_called_once_with(mock_request.return_value)
self.marshaller.process.assert_called_once_with(
mock_content.return_value)
self.assertEqual('', output_file.getvalue())
self.header_handler.GetReportDownloadHeaders.assert_called_once_with(
None, None)
def testDownloadReportWithAwql(self):
output_file = io.StringIO()
query = 'SELECT Id FROM Campaign WHERE NAME LIKE \'%Test%\''
file_format = 'CSV'
post_body = urllib.urlencode({'__fmt': file_format, '__rdquery': query})
if not PYTHON2:
post_body = bytes(post_body, 'utf-8')
headers = {'Authorization': 'ya29.something'}
self.header_handler.GetReportDownloadHeaders.return_value = headers
content = u'CONTENT STRING'
fake_request = io.StringIO() if PYTHON2 else io.BytesIO()
fake_request.write(content if PYTHON2 else bytes(content, 'utf-8'))
fake_request.seek(0)
with mock.patch(URL_REQUEST_PATH + '.Request') as mock_request:
self.opener.open.return_value = fake_request
self.report_downloader.DownloadReportWithAwql(
query, file_format, output_file)
mock_request.assert_called_once_with(
('https://adwords.google.com/api/adwords/reportdownload/%s'
% self.version), post_body, headers)
self.opener.open.assert_called_once_with(mock_request.return_value)
self.assertEqual(content, output_file.getvalue())
self.header_handler.GetReportDownloadHeaders.assert_called_once_with(
None, None)
def testDownloadReportWithBytesIO(self):
output_file = io.BytesIO()
report_definition = {'table': 'campaigns',
'downloadFormat': 'GZIPPED_CSV'}
serialized_report = 'nuinbwuign'
post_body = urllib.urlencode({'__rdxml': serialized_report})
if not PYTHON2:
post_body = bytes(post_body, 'utf-8')
headers = {'Authorization': 'ya29.something'}
self.header_handler.GetReportDownloadHeaders.return_value = headers
content = u'CONTENT STRING 广告客户'
fake_request = io.BytesIO()
fake_request.write(content.encode('utf-8') if PYTHON2
else bytes(content, 'utf-8'))
fake_request.seek(0)
self.marshaller.process.return_value = serialized_report
with mock.patch('suds.mx.Content') as mock_content:
with mock.patch(URL_REQUEST_PATH + '.Request') as mock_request:
self.opener.open.return_value = fake_request
self.report_downloader.DownloadReport(report_definition, output_file)
mock_request.assert_called_once_with(
('https://adwords.google.com/api/adwords/reportdownload/%s'
% self.version), post_body, headers)
self.opener.open.assert_called_once_with(mock_request.return_value)
self.marshaller.process.assert_called_once_with(
mock_content.return_value)
self.assertEqual(content, output_file.getvalue().decode('utf-8'))
self.header_handler.GetReportDownloadHeaders.assert_called_once_with(
None, None)
def testExtractError_badRequest(self):
response = mock.Mock()
response.code = 400
type_ = 'ReportDownloadError.INVALID_REPORT_DEFINITION_XML'
trigger = 'Invalid enumeration.'
field_path = 'Criteria.Type'
content_template = (
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
'<reportDownloadError><ApiError><type>%s</type><trigger>%s</trigger>'
'<fieldPath>%s</fieldPath></ApiError></reportDownloadError>')
content = content_template % (type_, trigger, field_path)
response.read.return_value = (content if PYTHON2
else bytes(content, 'utf-8'))
rval = self.report_downloader._ExtractError(response)
self.assertEqual(type_, rval.type)
self.assertEqual(trigger, rval.trigger)
self.assertEqual(field_path, rval.field_path)
self.assertEqual(response.code, rval.code)
self.assertEqual(response, rval.error)
self.assertEqual(content, rval.content)
self.assertIsInstance(rval, googleads.errors.AdWordsReportBadRequestError)
# Check that if the XML fields are empty, this still functions.
content = content_template % ('', '', '')
response.read.return_value = (content if PYTHON2
else bytes(content, 'utf-8'))
rval = self.report_downloader._ExtractError(response)
self.assertEqual(None, rval.type)
self.assertEqual(None, rval.trigger)
self.assertEqual(None, rval.field_path)
def testExtractError_malformedBadRequest(self):
response = mock.Mock()
response.code = 400
content = ('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
'<reportDownloadError><ApiError><type>1234</type><trigger>5678'
'</trigger></ApiError></ExtraElement></reportDownloadError>')
response.read.return_value = (content if PYTHON2
else bytes(content, 'utf-8'))
rval = self.report_downloader._ExtractError(response)
self.assertEqual(response.code, rval.code)
self.assertEqual(response, rval.error)
self.assertEqual(content, rval.content)
self.assertIsInstance(rval, googleads.errors.AdWordsReportError)
def testExtractError_notBadRequest(self):
response = mock.Mock()
response.code = 400
content = 'Page not found!'
response.read.return_value = (content if PYTHON2
else bytes(content, 'utf-8'))
rval = self.report_downloader._ExtractError(response)
self.assertEqual(response.code, rval.code)
self.assertEqual(response, rval.error)
self.assertEqual(content, rval.content)
self.assertIsInstance(rval, googleads.errors.AdWordsReportError)
if __name__ == '__main__':
unittest.main()
| dietrichc/streamline-ppc-reports | tests/adwords_test.py | Python | apache-2.0 | 18,546 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cross-language tests for JWT validation.
These tests test the non-cryptographic JWT validation. The tokens are MACed
with the same key and the MAC is always valid. We test how the validation
handles weird headers or payloads.
"""
import base64
import datetime
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import cleartext_keyset_handle
from tink import jwt
from tink import mac
from tink.proto import common_pb2
from tink.proto import hmac_pb2
from tink.proto import jwt_hmac_pb2
from tink.proto import tink_pb2
from util import testing_servers
SUPPORTED_LANGUAGES = testing_servers.SUPPORTED_LANGUAGES_BY_PRIMITIVE['jwt']
# Example from https://tools.ietf.org/html/rfc7519#section-3.1
EXAMPLE_TOKEN = ('eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.'
'eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQo'
'gImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.'
'dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk')
KEY_VALUE = (b'AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-'
b'1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow==')
KEYSET = None
MAC = None
EMPTY_VALIDATOR = jwt.new_validator(allow_missing_expiration=True)
def _base64_encode(data: bytes) -> bytes:
"""Does a URL-safe base64 encoding without padding."""
return base64.urlsafe_b64encode(data).rstrip(b'=')
def _keyset() -> bytes:
jwt_hmac_key = jwt_hmac_pb2.JwtHmacKey(
version=0,
algorithm=jwt_hmac_pb2.HS256,
key_value=base64.urlsafe_b64decode(KEY_VALUE))
keyset = tink_pb2.Keyset()
key = keyset.key.add()
key.key_data.type_url = ('type.googleapis.com/google.crypto.tink.JwtHmacKey')
key.key_data.value = jwt_hmac_key.SerializeToString()
key.key_data.key_material_type = tink_pb2.KeyData.SYMMETRIC
key.status = tink_pb2.ENABLED
key.key_id = 123
key.output_prefix_type = tink_pb2.RAW
keyset.primary_key_id = 123
return keyset.SerializeToString()
def _mac() -> mac.Mac:
hmac_key = hmac_pb2.HmacKey(
version=0, key_value=base64.urlsafe_b64decode(KEY_VALUE))
hmac_key.params.hash = common_pb2.SHA256
hmac_key.params.tag_size = 32
keyset = tink_pb2.Keyset()
key = keyset.key.add()
key.key_data.type_url = ('type.googleapis.com/google.crypto.tink.HmacKey')
key.key_data.value = hmac_key.SerializeToString()
key.key_data.key_material_type = tink_pb2.KeyData.SYMMETRIC
key.status = tink_pb2.ENABLED
key.key_id = 123
key.output_prefix_type = tink_pb2.RAW
keyset.primary_key_id = 123
keyset_handle = cleartext_keyset_handle.from_keyset(keyset)
return keyset_handle.primitive(mac.Mac)
def setUpModule():
global KEYSET, MAC
jwt.register_jwt_mac()
mac.register()
testing_servers.start('jwt')
KEYSET = _keyset()
MAC = _mac()
def tearDownModule():
testing_servers.stop()
def generate_token_from_bytes(header: bytes, payload: bytes) -> str:
"""Generates tokens from bytes with valid MACs."""
unsigned_compact = (_base64_encode(header) + b'.' + _base64_encode(payload))
mac_value = MAC.compute_mac(unsigned_compact)
return (unsigned_compact + b'.' + _base64_encode(mac_value)).decode('utf8')
def generate_token(header: str, payload: str) -> str:
"""Generates tokens with valid MACs."""
return generate_token_from_bytes(
header.encode('utf8'), payload.encode('utf8'))
class JwtTest(parameterized.TestCase):
def test_genenerate_token_generates_example(self):
token = generate_token(
'{"typ":"JWT",\r\n "alg":"HS256"}',
'{"iss":"joe",\r\n "exp":1300819380,\r\n '
'"http://example.com/is_root":true}')
self.assertEqual(token, EXAMPLE_TOKEN)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_valid(self, lang):
token = generate_token('{"alg":"HS256"}', '{"jti":"123"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
verified_jwt = jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
self.assertEqual(verified_jwt.jwt_id(), '123')
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_unknown_header_valid(self, lang):
token = generate_token('{"alg":"HS256", "unknown":{"a":"b"}}',
'{"jti":"123"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
verified_jwt = jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
self.assertEqual(verified_jwt.jwt_id(), '123')
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_empty_crit_header_invalid(self, lang):
# See https://tools.ietf.org/html/rfc7515#section-4.1.11
token = generate_token('{"alg":"HS256", "crit":[]}', '{"jti":"123"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_nonempty_crit_header_invalid(self, lang):
# See https://tools.ietf.org/html/rfc7515#section-4.1.11
token = generate_token(
'{"alg":"HS256","crit":["http://example.invalid/UNDEFINED"],'
'"http://example.invalid/UNDEFINED":true}', '{"jti":"123"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_typ_header(self, lang):
token = generate_token(
'{"typ":"typeHeader", "alg":"HS256"}', '{"jti":"123"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
validator_with_correct_type_header = jwt.new_validator(
expected_type_header='typeHeader', allow_missing_expiration=True)
jwt_mac.verify_mac_and_decode(token, validator_with_correct_type_header)
validator_with_missing_type_header = jwt.new_validator(
allow_missing_expiration=True)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_with_missing_type_header)
validator_that_ignores_type_header = jwt.new_validator(
ignore_type_header=True, allow_missing_expiration=True)
jwt_mac.verify_mac_and_decode(token, validator_that_ignores_type_header)
validator_with_wrong_type_header = jwt.new_validator(
expected_type_header='typeHeader', allow_missing_expiration=True)
jwt_mac.verify_mac_and_decode(token, validator_with_wrong_type_header)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_expiration(self, lang):
token = generate_token('{"alg":"HS256"}', '{"jti":"123", "exp":1234}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
# same time is expired.
validator_with_same_time = jwt.new_validator(
fixed_now=datetime.datetime.fromtimestamp(1234, datetime.timezone.utc))
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_with_same_time)
# a second before is fine
validator_before = jwt.new_validator(
fixed_now=datetime.datetime.fromtimestamp(1233,
datetime.timezone.utc))
jwt_mac.verify_mac_and_decode(token, validator_before)
# 3 seconds too late with 3 seconds clock skew is expired.
validator_too_late_with_clockskew = jwt.new_validator(
fixed_now=datetime.datetime.fromtimestamp(1237, datetime.timezone.utc),
clock_skew=datetime.timedelta(seconds=3))
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_too_late_with_clockskew)
# 2 seconds too late with 3 seconds clock skew is fine.
validator_still_ok_with_clockskew = jwt.new_validator(
fixed_now=datetime.datetime.fromtimestamp(1236, datetime.timezone.utc),
clock_skew=datetime.timedelta(seconds=3))
jwt_mac.verify_mac_and_decode(token, validator_still_ok_with_clockskew)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_float_expiration(self, lang):
token = generate_token('{"alg":"HS256"}', '{"jti":"123", "exp":1234.5}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
validate_after = jwt.new_validator(
fixed_now=datetime.datetime.fromtimestamp(1235.5,
datetime.timezone.utc))
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validate_after)
validate_before = jwt.new_validator(
fixed_now=datetime.datetime.fromtimestamp(1233.5,
datetime.timezone.utc))
jwt_mac.verify_mac_and_decode(token, validate_before)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_exp_expiration_is_fine(self, lang):
token = generate_token('{"alg":"HS256"}', '{"exp":1e10}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_large_expiration_is_fine(self, lang):
token = generate_token('{"alg":"HS256"}', '{"exp":253402300799}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_too_large_expiration_is_invalid(self, lang):
token = generate_token('{"alg":"HS256"}', '{"exp":253402300800}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_way_too_large_expiration_is_invalid(self, lang):
token = generate_token('{"alg":"HS256"}', '{"exp":1e30}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_infinity_expiration_is_invalid(self, lang):
token = generate_token('{"alg":"HS256"}', '{"jti":"123", "exp":Infinity}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_not_before(self, lang):
token = generate_token('{"alg":"HS256"}', '{"jti":"123", "nbf":1234}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
# same time as not-before fine.
validator_same_time = jwt.new_validator(
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1234, datetime.timezone.utc))
jwt_mac.verify_mac_and_decode(token, validator_same_time)
# one second before is not yet valid
validator_before = jwt.new_validator(
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1233, datetime.timezone.utc))
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_before)
# 3 seconds too early with 3 seconds clock skew is fine
validator_ok_with_clockskew = jwt.new_validator(
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1231, datetime.timezone.utc),
clock_skew=datetime.timedelta(seconds=3))
jwt_mac.verify_mac_and_decode(token, validator_ok_with_clockskew)
# 3 seconds too early with 2 seconds clock skew is not yet valid.
validator_too_early_with_clockskew = jwt.new_validator(
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1231, datetime.timezone.utc),
clock_skew=datetime.timedelta(seconds=2))
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_too_early_with_clockskew)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_float_not_before(self, lang):
token = generate_token('{"alg":"HS256"}', '{"jti":"123", "nbf":1234.5}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
validator_before = jwt.new_validator(
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1233.5,
datetime.timezone.utc))
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_before)
validator_after = jwt.new_validator(
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1235.5,
datetime.timezone.utc))
jwt_mac.verify_mac_and_decode(token, validator_after)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_issued_at(self, lang):
token = generate_token('{"alg":"HS256"}', '{"jti":"123", "iat":1234}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
# same time as issued-at fine.
validator_same_time = jwt.new_validator(
expect_issued_in_the_past=True,
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1234, datetime.timezone.utc))
jwt_mac.verify_mac_and_decode(token, validator_same_time)
# one second before is not yet valid
validator_before = jwt.new_validator(
expect_issued_in_the_past=True,
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1233, datetime.timezone.utc))
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_before)
# ten second before but without expect_issued_in_the_past is fine
validator_without_iat_validation = jwt.new_validator(
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1224, datetime.timezone.utc))
jwt_mac.verify_mac_and_decode(token, validator_without_iat_validation)
# 3 seconds too early with 3 seconds clock skew is fine
validator_ok_with_clockskew = jwt.new_validator(
expect_issued_in_the_past=True,
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1231, datetime.timezone.utc),
clock_skew=datetime.timedelta(seconds=3))
jwt_mac.verify_mac_and_decode(token, validator_ok_with_clockskew)
# 3 seconds too early with 2 seconds clock skew is not yet valid.
validator_too_early_with_clockskew = jwt.new_validator(
expect_issued_in_the_past=True,
allow_missing_expiration=True,
fixed_now=datetime.datetime.fromtimestamp(1231, datetime.timezone.utc),
clock_skew=datetime.timedelta(seconds=2))
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_too_early_with_clockskew)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_issuer(self, lang):
token = generate_token('{"alg":"HS256"}', '{"iss":"joe"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
validator_with_correct_issuer = jwt.new_validator(
expected_issuer='joe', allow_missing_expiration=True)
jwt_mac.verify_mac_and_decode(token, validator_with_correct_issuer)
validator_without_issuer = jwt.new_validator(allow_missing_expiration=True)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_without_issuer)
validator_that_ignores_issuer = jwt.new_validator(
ignore_issuer=True, allow_missing_expiration=True)
jwt_mac.verify_mac_and_decode(token, validator_that_ignores_issuer)
validator_with_wrong_issuer = jwt.new_validator(
expected_issuer='Joe', allow_missing_expiration=True)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_with_wrong_issuer)
val4 = jwt.new_validator(
expected_issuer='joe ', allow_missing_expiration=True)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, val4)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_empty_string_issuer(self, lang):
token = generate_token('{"alg":"HS256"}', '{"iss":""}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
jwt_mac.verify_mac_and_decode(
token,
jwt.new_validator(expected_issuer='', allow_missing_expiration=True))
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_issuer_with_wrong_type(self, lang):
token = generate_token('{"alg":"HS256"}', '{"iss":123}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_invalid_utf8_in_header(self, lang):
token = generate_token_from_bytes(b'{"alg":"HS256", "a":"\xc2"}',
b'{"iss":"joe"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_invalid_utf8_in_payload(self, lang):
token = generate_token_from_bytes(b'{"alg":"HS256"}', b'{"jti":"joe\xc2"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_with_utf16_surrogate_in_payload(self, lang):
# The JSON string contains the G clef character (U+1D11E) in UTF8.
token = generate_token_from_bytes(b'{"alg":"HS256"}',
b'{"jti":"\xF0\x9D\x84\x9E"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
token = jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
self.assertEqual(token.jwt_id(), u'\U0001d11e')
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_with_json_escaped_utf16_surrogate_in_payload(self, lang):
# The JSON string contains "\uD834\uDD1E", which should decode to
# the G clef character (U+1D11E).
token = generate_token('{"alg":"HS256"}', '{"jti":"\\uD834\\uDD1E"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
token = jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
self.assertEqual(token.jwt_id(), u'\U0001d11e')
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_with_invalid_json_escaped_utf16_in_payload(self, lang):
# The JSON string contains "\uD834", which gets decoded into an invalid
# UTF16 character.
token = generate_token('{"alg":"HS256"}', '{"jti":"\\uD834"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_audience(self, lang):
token = generate_token('{"alg":"HS256"}', '{"aud":["joe", "jane"]}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
validator_with_correct_audience = jwt.new_validator(
expected_audience='joe', allow_missing_expiration=True)
jwt_mac.verify_mac_and_decode(token, validator_with_correct_audience)
validator_with_correct_audience2 = jwt.new_validator(
expected_audience='jane', allow_missing_expiration=True)
jwt_mac.verify_mac_and_decode(token, validator_with_correct_audience2)
validator_without_audience = jwt.new_validator(
allow_missing_expiration=True)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_without_audience)
validator_that_ignores_audience = jwt.new_validator(
ignore_audiences=True, allow_missing_expiration=True)
jwt_mac.verify_mac_and_decode(token, validator_that_ignores_audience)
validator_with_wrong_audience = jwt.new_validator(
expected_audience='Joe', allow_missing_expiration=True)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, validator_with_wrong_audience)
val5 = jwt.new_validator(
expected_audience='jane ', allow_missing_expiration=True)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, val5)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_audience_string(self, lang):
token = generate_token('{"alg":"HS256"}', '{"aud":"joe"}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
val1 = jwt.new_validator(
expected_audience='joe', allow_missing_expiration=True)
jwt_mac.verify_mac_and_decode(token, val1)
val3 = EMPTY_VALIDATOR
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, val3)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_audiences_with_wrong_type(self, lang):
token = generate_token('{"alg":"HS256"}', '{"aud":["joe", 123]}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_token_with_empty_audiences(self, lang):
token = generate_token('{"alg":"HS256"}', '{"aud":[]}')
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_token_with_many_recursions(self, lang):
num_recursions = 10
payload = ('{"a":' * num_recursions) + '""' + ('}' * num_recursions)
token = generate_token('{"alg":"HS256"}', payload)
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_verify_token_with_too_many_recursions_fails(self, lang):
# num_recursions has been chosen such that parsing of this token fails
# in all languages. We want to make sure that the algorithm does not
# hang or crash in this case, but only returns a parsing error.
num_recursions = 10000
payload = ('{"a":' * num_recursions) + '""' + ('}' * num_recursions)
token = generate_token('{"alg":"HS256"}', payload)
jwt_mac = testing_servers.jwt_mac(lang, KEYSET)
with self.assertRaises(tink.TinkError):
jwt_mac.verify_mac_and_decode(token, EMPTY_VALIDATOR)
if __name__ == '__main__':
absltest.main()
| google/tink | testing/cross_language/jwt_validation_test.py | Python | apache-2.0 | 22,698 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from konfera.models.countries import COUNTRIES
from konfera.models.abstract import KonferaModel
class Speaker(KonferaModel):
TITLE_UNSET = 'none'
TITLE_MR = 'mr'
TITLE_MS = 'ms'
TITLE_MX = 'mx'
COUNTRY_DEFAULT = 'SK'
TITLE_CHOICES = (
(TITLE_UNSET, ''),
(TITLE_MR, _('Mr.')),
(TITLE_MS, _('Ms.')),
(TITLE_MX, _('Mx.')),
)
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
title = models.CharField(
choices=TITLE_CHOICES,
max_length=4,
default=TITLE_UNSET,
blank=True
)
email = models.EmailField(max_length=255)
phone = models.CharField(max_length=64, blank=True)
bio = models.TextField(blank=True)
url = models.URLField(blank=True)
social_url = models.URLField(blank=True)
country = models.CharField(
choices=COUNTRIES,
max_length=2,
default=COUNTRY_DEFAULT
)
sponsor = models.ForeignKey('Sponsor', blank=True, null=True, related_name='sponsored_speakers')
image = models.ImageField(upload_to='speaker/', blank=True)
def __str__(self):
return '{first_name} {last_name}'.format(
first_name=self.first_name,
last_name=self.last_name
).strip()
| pyconsk/django-konfera | konfera/models/speaker.py | Python | mit | 1,385 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Integration", sigma = 0.0, exog_count = 20, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Integration/trend_Lag1Trend/cycle_7/ar_12/test_artificial_128_Integration_Lag1Trend_7_12_20.py | Python | bsd-3-clause | 267 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for video problems with artificially generated frames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensor2tensor.data_generators import video_utils
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError:
pass
@registry.register_problem
class VideoStochasticShapes10k(video_utils.VideoProblem):
"""Shapes moving in a stochastic way."""
@property
def is_generate_per_split(self):
"""Whether we have a train/test split or just hold out data."""
return False # Just hold out some generated data for evals.
@property
def frame_height(self):
return 64
@property
def frame_width(self):
return 64
@property
def total_number_of_frames(self):
# 10k videos
return 10000 * self.video_length
@property
def video_length(self):
return 5
@property
def random_skip(self):
return False
@property
def only_keep_videos_from_0th_frame(self):
return True
@property
def use_not_breaking_batching(self):
return True
def eval_metrics(self):
return []
@property
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
data_fields = {
"frame_number": tf.FixedLenFeature([1], tf.int64),
}
decoders = {
"frame_number": tf.contrib.slim.tfexample_decoder.Tensor(
tensor_key="frame_number"),
}
return data_fields, decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {
"inputs": modalities.VideoModality,
"targets": modalities.VideoModality,
}
p.vocab_size = {
"inputs": 256,
"targets": 256,
}
@staticmethod
def get_circle(x, y, z, c, s):
"""Draws a circle with center(x, y), color c, size s and z-order of z."""
cir = plt.Circle((x, y), s, fc=c, zorder=z)
return cir
@staticmethod
def get_rectangle(x, y, z, c, s):
"""Draws a rectangle with center(x, y), color c, size s and z-order of z."""
rec = plt.Rectangle((x-s, y-s), s*2.0, s*2.0, fc=c, zorder=z)
return rec
@staticmethod
def get_triangle(x, y, z, c, s):
"""Draws a triangle with center (x, y), color c, size s and z-order of z."""
points = np.array([[0, 0], [s, s*math.sqrt(3.0)], [s*2.0, 0]])
tri = plt.Polygon(points + [x-s, y-s], fc=c, zorder=z)
return tri
def generate_stochastic_shape_instance(self):
"""Yields one video of a shape moving to a random direction.
The size and color of the shapes are random but
consistent in a single video. The speed is fixed.
Raises:
ValueError: The frame size is not square.
"""
if self.frame_height != self.frame_width or self.frame_height % 2 != 0:
raise ValueError("Generator only supports square frames with even size.")
lim = 10.0
direction = np.array([[+1.0, +1.0],
[+1.0, +0.0],
[+1.0, -1.0],
[+0.0, +1.0],
[+0.0, -1.0],
[-1.0, +1.0],
[-1.0, +0.0],
[-1.0, -1.0]
])
sp = np.array([lim/2.0, lim/2.0])
rnd = np.random.randint(len(direction))
di = direction[rnd]
colors = ["b", "g", "r", "c", "m", "y"]
color = np.random.choice(colors)
shape = np.random.choice([
VideoStochasticShapes10k.get_circle,
VideoStochasticShapes10k.get_rectangle,
VideoStochasticShapes10k.get_triangle])
speed = 1.0
size = np.random.uniform(0.5, 1.5)
back_color = str(0.0)
plt.ioff()
xy = np.array(sp)
for _ in range(self.video_length):
fig = plt.figure()
fig.set_dpi(self.frame_height//2)
fig.set_size_inches(2, 2)
ax = plt.axes(xlim=(0, lim), ylim=(0, lim))
# Background
ax.add_patch(VideoStochasticShapes10k.get_rectangle(
0.0, 0.0, -1.0, back_color, 25.0))
# Foreground
ax.add_patch(shape(xy[0], xy[1], 0.0, color, size))
plt.axis("off")
plt.tight_layout(pad=-2.0)
fig.canvas.draw()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
image = np.copy(np.uint8(image))
plt.close()
xy += speed * di
yield image
def generate_samples(self, data_dir, tmp_dir, unused_dataset_split):
counter = 0
done = False
while not done:
for frame_number, frame in enumerate(
self.generate_stochastic_shape_instance()):
if counter >= self.total_number_of_frames:
done = True
break
yield {"frame": frame, "frame_number": [frame_number]}
counter += 1
| mlperf/training_results_v0.5 | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/data_generators/video_generated.py | Python | apache-2.0 | 5,705 |
import asyncio
from db_object import DBItem
from core import BotMiddleware, UsesMiddleware, FakeRAWMiddleware, FakeActionMiddleware, LoggerMiddleware, Log, UsesLogger
from actions import (
ReplyAction,
QueuedNotificationAction,
PlayOneSongAction,
ListQueueAction,
DumpQueue
)
from tinydb import TinyDB, where
from event import PlayEvent
from command import (
QueueCommand,
QueueFirstCommand,
SkipCommand,
DramaticSkipCommand, #for the my mys
ClearQueueCommand,
ListQueueCommand,
TestSkipCommand,
NeonLightShowCommand,
HelpCommand,
DumpQueueCommand
)
class UsesRaw(UsesMiddleware):
CONSUMES = FakeRAWMiddleware
class SendsActions(UsesMiddleware):
PRODUCES = FakeActionMiddleware
@asyncio.coroutine
def send_action(self, action):
yield from self.send(action, tag=FakeActionMiddleware.TAG)
class PacketMiddleware(BotMiddleware, UsesLogger, UsesRaw):
TAG = 'tag_bot_message'
CONTROL_TAG = 'tag_bot_message_control'
TYPE = BotMiddleware.OUTPUT
CONTROL_BACKLOG_START = 'bot_message_backlog_start'
CONTROL_BACKLOG_END = 'bot_message_backlog_end'
LOG_NAME = 'Packet'
ENABLED_MESSAGES = [
NeonLightShowCommand,
HelpCommand,
]
def __init__(self):
super(PacketMiddleware, self).__init__()
self.enabled_messages = []
self.enabled_messages.extend(self.ENABLED_MESSAGES)
UsesRaw.set_handler(self, self.handle_event)
def request_support(self, request):
incompatible_classes = []
for _class in request:
if hasattr(_class, 'is_this'):
self.enabled_messages.append(_class)
else:
incompatible_classes.append(_class)
if incompatible_classes:
return incompatible_classes
else:
return True
def message_id_exists(self, uid):
exists = self.db.search(where('uid') == uid)
return len(exists) > 0
def create_db_object(self, packet):
content = packet.data['content']
possibles = [(event_class.is_this(content), event_class) for event_class in self.enabled_messages]
score, event_class = sorted(possibles, key=lambda x: x[0])[-1]
if score > 0:
try:
return event_class(packet)
except:
self.exception('failed to create: {}', packet.data)
return None
def save_to_db(self, db_item):
#seld.debug('adding {} to DB' db_item)
if hasattr(db_item, 'to_db_dict'):
db_dict = db_item.to_db_dict()
self.db.insert(db_dict)
@asyncio.coroutine
def send_control_message(self, message):
#def _send(queue_list, tag, message):
yield from self._send(self._output[self.TAG], self.CONTROL_TAG, message)
@asyncio.coroutine
def handle_event(self, packet):
if packet.type == 'snapshot-event':
yield from self.send_control_message(self.CONTROL_BACKLOG_START)
for message in packet.messages():
if self.message_id_exists(message.uid):
if not message.past:
self.verbose('ignoring {}', message.data['content'])
continue
self.verbose('message: {}', message.data['content'])
db_object = self.create_db_object(message)
if db_object:
self.debug('DB Object: {}', db_object)
if not db_object.is_prepared():
try:
yield from db_object.prepare()
except Exception as e:
self.debug('Failed to process: {}; Exception: {}', db_object, e)
# only record objects that are sucessfully prepared to the db
if db_object.is_prepared():
self.save_to_db(db_object)
if db_object.DB_TAG == HelpCommand.DB_TAG:
db_object.set_commands(self.enabled_messages)
yield from self.send(db_object)
if self.closing:
break
if packet.type == 'snapshot-event':
yield from self.send_control_message(self.CONTROL_BACKLOG_END)
class UsesCommands(UsesMiddleware):
CONSUMES = PacketMiddleware
@classmethod
def setup_self(cls, self):
self.in_backlog = False
self.backlog_processed = False
self._recv_functions[PacketMiddleware.CONTROL_TAG] = self.handle_control_message
@asyncio.coroutine
def handle_control_message(self, message):
if message == PacketMiddleware.CONTROL_BACKLOG_START:
self.in_backlog = True
elif message == PacketMiddleware.CONTROL_BACKLOG_END:
self.in_backlog = False
self.backlog_processed = True
class SimpleActionMiddleware(BotMiddleware, UsesCommands, UsesLogger, SendsActions):
TAG = 'tag_simple_action_middleware'
TYPE = BotMiddleware.INPUT
LOG_NAME = 'Action'
@asyncio.coroutine
def setup(self, db):
yield from super(SimpleActionMiddleware, self).setup(db)
UsesCommands.set_handler(self, self.handle_event)
@asyncio.coroutine
def handle_event(self, command):
if hasattr(command, 'get_actions') and not self.in_backlog:
for action in command.get_actions():
yield from self.send_action(action)
class PlayQueuedSongsMiddleware(BotMiddleware, UsesCommands, UsesLogger, SendsActions):
TAG = 'tag_queue_events'
TYPE = BotMiddleware.OUTPUT
LOG_NAME = 'Queue'
MIDDLEWARE_SUPPORT_REQUESTS = {
PacketMiddleware.TAG: [
QueueCommand, SkipCommand, ClearQueueCommand, ListQueueCommand, TestSkipCommand, DumpQueueCommand, PlayEvent, DramaticSkipCommand, QueueFirstCommand
]
}
def __init__(self):
super(PlayQueuedSongsMiddleware, self).__init__()
self.message_queue = asyncio.JoinableQueue()
self.song_queue = []
self.current_song = None
self.play_callback = None
# queued a song, waiting to see if it turns up
self.expecting_song = False
self.in_backlog = False
UsesCommands.set_handler(self, self.handle_event)
@asyncio.coroutine
def start_close(self):
if self.play_callback:
self.play_callback.cancel()
yield from super(PlayQueuedSongsMiddleware, self).start_close()
def status_string(self):
return '\n'.join([
'QueueMiddleware: Current Song: {}({}s)'.format(self.current_song, self.current_song.remaining_duration() if self.current_song else 'NaN'),
'\tCurrent Queue: {}'.format(self.song_queue)
])
def load_state_from_db(self, db):
super(PlayQueuedSongsMiddleware, self).load_state_from_db(db)
self.debug('load state from db')
saved_state = db.search(where('type') == self.TAG)
if saved_state:
queue = [db.search(where('uid') == uid)[0] for uid in saved_state[0]['queue']]
self.song_queue = [DBItem.create_object_from_db_entry(song) for song in queue]
self.song_queue.sort()
self.debug('loaded queue: {}', self.song_queue)
events = db.search(where('type') == PlayEvent.DB_TAG)
if events:
events = sorted(events, key=lambda x: x['timestamp'])
if len(events):
event = DBItem.create_object_from_db_entry(events[-1])
self.current_song = event
self.debug('loded current song: {}', self.current_song)
if self.song_queue:
self.play_song()
def save_state_to_db(self, db):
db_dict = {
'type': self.TAG,
'queue': [str(item.uid) for item in self.song_queue]
}
if db.search(where('type') == self.TAG):
db.update(db_dict, where('type') == self.TAG)
else:
db.insert(db_dict)
def get_next_songs(self):
first = None
next = None
if len(self.song_queue) > 0:
first = self.song_queue[0]
if len(self.song_queue) > 1:
next = self.song_queue[1]
return first, next
@asyncio.coroutine
def play_later(self, delay):
song_one, song_two = self.get_next_songs()
self.debug("Playing {} in {} seconds.", song_one, delay)
yield from asyncio.sleep(delay)
song_one, song_two = self.get_next_songs()
while not self.backlog_processed:
self.verbose("Backlog not done, waiting")
yield from asyncio.sleep(0.5)
self.expecting_song = True
yield from self.send_action(PlayOneSongAction(song_one, song_two))
#yield from self.action_queue.put(PlayOneSongAction(song_one, song_two))
def play_song(self):
if self.closing:
return
if self.play_callback:
self.play_callback.cancel()
delay = 0
if self.current_song:
delay = self.current_song.remaining_duration()
if self.expecting_song:
delay += 3
self.play_callback = asyncio.get_event_loop().create_task(
self.play_later(delay)
)
def handle_play_event(self, play):
self.current_song = play
if self.song_queue and self.song_queue[0].youtube_info == play.youtube_info:
self.debug('Song matches first song in queue, popping item: {}', self.song_queue[0])
self.song_queue.pop(0)
return
for qcommand in self.song_queue:
if play.timestamp > qcommand.timestamp\
and play.youtube_info == qcommand.youtube_info:
self.debug('Play event can satisfy song in queue and so removing out of order queue event: {}', qcommand)
self.song_queue.remove(qcommand)
break
@asyncio.coroutine
def handle_event(self, message):
if not message.is_prepared():
return
self.verbose('Got Message: {}', message.DB_TAG)
reply_to = message.uid
action = None
if self.current_song and self.current_song.remaining_duration() == 0:
self.current_song = None
if QueueCommand.DB_TAG == message.DB_TAG:
self.song_queue.append(message)
action = QueuedNotificationAction(self.song_queue, self.current_song, message.youtube_info, reply_to)
elif QueueFirstCommand.DB_TAG == message.DB_TAG:
self.song_queue.insert(0, message)
# null queue as its first
action = QueuedNotificationAction([], self.current_song, message.youtube_info, reply_to)
elif PlayEvent.DB_TAG in message.DB_TAG:
self.expecting_song = False
self.handle_play_event(message)
elif SkipCommand.DB_TAG in message.DB_TAG:
self.current_song = None
self.expecting_song = False
elif DramaticSkipCommand.DB_TAG in message.DB_TAG:
action = ReplyAction("!play https://www.youtube.com/watch?v=a1Y73sPHKxw")
elif ClearQueueCommand.DB_TAG in message.DB_TAG:
self.song_queue = []
elif ListQueueCommand.DB_TAG in message.DB_TAG:
action = ListQueueAction(self.song_queue, self.current_song, reply_to)
elif DumpQueueCommand.DB_TAG in message.DB_TAG:
action = DumpQueue(self.song_queue)
self.song_queue = []
if action:
if not self.backlog_processed:
self.verbose('In backlog, would have sent: {}', action)
else:
yield from self.send_action(action)
self.play_song()
self.save_state_to_db(self.db)
self.debug(self.status_string())
| aeturnum/NeonDJBot | middleware.py | Python | gpl-3.0 | 10,013 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class TimeFreqLSTMCell(rnn_cell.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
"""
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to
"TimeFreqLSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "TimeFreqLSTMCell"
concat_w = _get_concat_variable(
"W", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2*fq*self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [freq_inputs[fq], m_prev,
m_prev_freq])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat(1, [c, m])
m_out = m
else:
state_out = array_ops.concat(1, [state_out, c, m])
m_out = array_ops.concat(1, [m_out, m])
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(rnn_cell.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, default False. Set True to enable diagonal/peephole
connections.
share_time_frequency_weights: bool, default False. Set True to enable
shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
"""
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "GridLSTMCell"
concat_w_f = _get_concat_variable(
"W_f", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable(
"W_t", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
if self._use_peepholes:
# Diagonal connections
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf", shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf", shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf", shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt", shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt", shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt", shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef", shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef", shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef", shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet", shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet", shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
c_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
c_prev_time = array_ops.slice(state, [0, 2 * freq_index *
self._num_units],
[-1, self._num_units])
m_prev_time = array_ops.slice(state, [0, (2 * freq_index + 1) *
self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [freq_inputs[freq_index], m_prev_time,
m_prev_freq])
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_f), b_f)
i_freq, j_freq, f_freq, o_freq = array_ops.split(1, 4, lstm_matrix_freq)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_t), b_t)
i_time, j_time, f_time, o_time = array_ops.split(1, 4,
lstm_matrix_time)
# F-LSTM c_freq
if self._use_peepholes:
c_freq = (sigmoid(f_freq + self._forget_bias + w_f_diag_freqf * (
c_prev_freq) + w_f_diag_freqt * c_prev_time) * c_prev_freq +
sigmoid(i_freq + w_i_diag_freqf * c_prev_freq + (
w_i_diag_freqt * c_prev_time)) * tanh(j_freq))
else:
c_freq = (sigmoid(f_freq + self._forget_bias) * c_prev_freq +
sigmoid(i_freq) * tanh(j_freq))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
if self._use_peepholes:
if self._share_time_frequency_weights:
c_time = sigmoid(f_time + self._forget_bias + w_f_diag_freqf * (
c_prev_freq + w_f_diag_freqt * c_prev_time)) * c_prev_time + (
sigmoid(i_time + w_i_diag_freqf * c_prev_freq + (
w_i_diag_freqt * c_prev_time)) * tanh(j_time))
else:
c_time = sigmoid(f_time + self._forget_bias + w_f_diag_timef * (
c_prev_time + w_f_diag_timet * c_prev_time)) * c_prev_time + (
sigmoid(i_time + w_i_diag_timef * c_prev_freq + (
w_i_diag_timet * c_prev_time)) * tanh(j_time))
else:
c_time = (sigmoid(f_time + self._forget_bias) * c_prev_time +
sigmoid(i_time) * tanh(j_time))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time + w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out = array_ops.concat(1, [c_time, m_time])
m_out = array_ops.concat(1, [m_time, m_freq])
else:
state_out = array_ops.concat(1, [state_out, c_time, m_time])
m_out = array_ops.concat(1, [m_out, m_time, m_freq])
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
| TakayukiSakai/tensorflow | tensorflow/contrib/rnn/python/ops/rnn_cell.py | Python | apache-2.0 | 20,382 |
import os, sys
def load_setup_modules(client_dir):
try:
sys.path.insert(0, client_dir)
import setup_modules
finally:
sys.path.pop(0)
return setup_modules
dirname = os.path.dirname(sys.modules[__name__].__file__)
virt_test_dir = os.path.abspath(os.path.join(dirname, ".."))
sys.path.insert(0, virt_test_dir)
try:
import autotest.client.setup_modules as setup_modules
client_dir = os.path.dirname(setup_modules.__file__)
sm = setup_modules
except ImportError:
try:
client_dir = os.path.abspath(os.path.join(dirname, "..", "..", ".."))
sm = load_setup_modules(client_dir)
except:
try:
client_dir = os.path.join(os.environ['AUTOTEST_PATH'], 'client')
except KeyError:
print("Environment variable $AUTOTEST_PATH not set. "
"please set it to a path containing an autotest checkout")
sys.exit(1)
if not os.path.isdir(client_dir):
print('Autotest client library directory was not found at: "%s"' %
client_dir)
print('Please check if the environment variable "$AUTOTEST_PATH" '
'points to a valid location')
sys.exit(1)
sm = load_setup_modules(client_dir)
sm.setup(base_path=client_dir, root_module_name="autotest.client")
| sathnaga/virt-test | libguestfs/common.py | Python | gpl-2.0 | 1,347 |
#!/usr/bin/env python3
# encoding: UTF-8
# This file is part of turberfield.
#
# Turberfield is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Turberfield is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with turberfield. If not, see <http://www.gnu.org/licenses/>.
import bisect
from collections import OrderedDict
from collections.abc import Mapping
import enum
import numbers
import operator
import unittest
from turberfield.dialogue.matcher import Matcher
from turberfield.dialogue.model import SceneScript
class Scale(enum.IntEnum):
one = 1
two = 2
class Surface(enum.Enum):
o = complex(0, 0)
a = complex(3, 0)
b = complex(3, 4)
class Discrete(enum.Enum):
stop = 0
start = 1
class DictComparisonTests(unittest.TestCase):
def test_dict_equality(self):
a = {"a": 1}
b = OrderedDict([("a", 1)])
self.assertEqual(a, b)
def test_subtraction(self):
a = {"a": 1}
b = OrderedDict([("a", 2)])
self.assertRaises(TypeError, operator.sub, a, b)
def test_bisect(self):
seq = ({"a": 1}, OrderedDict([("a", 2)]))
keys = [Matcher.mapping_key(i) for i in seq]
self.assertEqual(1, bisect.bisect_left(keys, Matcher.mapping_key({"a": 1.5})))
def test_sort(self):
a = {"a": 2}
b = OrderedDict([("a", 1)])
self.assertEqual([b, a], list(sorted((a, b), key=Matcher.mapping_key)))
def test_sort_none(self):
a = {"a": 2}
b = OrderedDict([("a", None)])
self.assertEqual([b, a], list(sorted((a, b), key=Matcher.mapping_key)))
class EnumComparisonTests(unittest.TestCase):
def test_bisect_complex_values(self):
self.assertGreater(abs(Surface.b.value), abs(Surface.a.value))
seq = ({"a": Surface.o}, OrderedDict([("a", Surface.b)]))
keys = [Matcher.mapping_key(i) for i in seq]
self.assertEqual(1, bisect.bisect_left(keys, Matcher.mapping_key({"a": Surface.a})))
def test_sort_complex_values(self):
self.assertGreater(abs(Surface.a.value), abs(Surface.o.value))
a = {"a": Surface.a}
b = OrderedDict([("a", Surface.o)])
self.assertEqual([b, a], list(sorted((a, b), key=Matcher.mapping_key)))
def test_sort_nested_enums_with_complex_values(self):
self.assertGreater(abs(Surface.a.value), abs(Surface.o.value))
a = {"a": {"b": Surface.a}}
b = {"a": {"b": Surface.o}}
self.assertEqual([b, a], list(sorted((a, b), key=Matcher.mapping_key)))
def test_intenum_types(self):
a = {"a": Scale.two}
b = OrderedDict([("a", Scale.one)])
self.assertEqual([b, a], list(sorted((a, b), key=Matcher.mapping_key)))
def test_sort_discrete_enum_types(self):
a = {"a": Discrete.start}
b = OrderedDict([("a", Discrete.stop)])
self.assertEqual([b, a], list(sorted((a, b), key=Matcher.mapping_key)))
class MatcherTests(unittest.TestCase):
def setUp(self):
self.folders = [
SceneScript.Folder(
"turberfield.dialogue.test", "Folder 2", {"pos": 1},
["two.rst"], None),
SceneScript.Folder(
"turberfield.dialogue.test", "Folder 1", {"pos": 0.5},
["one.rst"], None),
SceneScript.Folder(
"turberfield.dialogue.test", "Folder 4", {"pos": 3},
["four.rst"], None),
SceneScript.Folder(
"turberfield.dialogue.test", "Folder 3", {"pos": 2},
["three.rst"], None),
]
def test_exact_match(self):
matcher = Matcher(self.folders)
self.assertEqual(4, len(matcher.keys))
rv = list(matcher.options({"pos": 3}))
self.assertEqual(1, len(rv), rv)
self.assertIsInstance(rv[0], SceneScript.Folder)
self.assertEqual({"pos": 3}, rv[0].metadata)
def test_multi_match(self):
matcher = Matcher(self.folders)
rv = list(matcher.options({"pos": 1.5}))
self.assertEqual(2, len(rv))
self.assertEqual({"pos": 1}, rv[0].metadata)
self.assertEqual({"pos": 2}, rv[1].metadata)
def test_single_match(self):
matcher = Matcher(self.folders)
rv = list(matcher.options({"pos": 0}))
self.assertEqual(1, len(rv), rv)
self.assertEqual({"pos": 0.5}, rv[0].metadata)
| tundish/turberfield-dialogue | turberfield/dialogue/test/test_matcher.py | Python | gpl-3.0 | 4,798 |
# -*- coding: utf-8 -*-
import json
import logging
import requests
logger = logging.getLogger(__name__)
class OMDBApiClient(object):
URL = "http://www.omdbapi.com/"
def get_movie_info(self, title=None, id=None):
assert(title or id)
params = {}
if not title is None:
params['t'] = title
if not id is None:
params['i'] = id
text = requests.get(self.URL, params=params).text
return json.loads(text)
def search_for_movies(self, title):
text = requests.get(self.URL, params={'s':title}).text
data = json.loads(text)
if 'Search' in data:
return data['Search']
return []
def match_title(self, title):
candidates = self.search_for_movies(title)
logger.debug("---TITLE MATCHING---")
logger.debug("Trying %s" % title)
if candidates:
return candidates[0]
candidates = self.search_for_movies(" ".join(title.split(".")))
logger.debug("Trying %s" % " ".join(title.split(".")))
if candidates:
return candidates[0]
parts = []
for part in title.split("."):
parts.extend(part.split(" "))
for i in range(len(parts)-1, 1, -1):
candidates = self.search_for_movies(" ".join(parts[:i]))
logger.debug("Trying %s" % " ".join(parts[:i]))
if candidates:
return candidates[0]
def search_for_movie_info(self, title):
candidate = self.match_title(title)
if candidate:
return self.get_movie_info(id=candidate['imdbID']) | rafallo/p2c | catalogs/info_clients/omdbclient.py | Python | mit | 1,630 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for Invenio-Search.
This file is imported by ``invenio_search.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = '1.0.0'
| tiborsimko/invenio-search | invenio_search/version.py | Python | mit | 444 |
# Copyright (c) 2015. Librato, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Librato, Inc. nor the names of project contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LIBRATO, INC. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest
from librato_python_web.instrumentor import telemetry
from librato_python_web.instrumentor.telemetry import TestTelemetryReporter
from librato_python_web.instrumentor.log.logging import LoggingInstrumentor
from librato_python_web.instrumentor.context import push_state, pop_state
LoggingInstrumentor().run()
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class LoggingTest(unittest.TestCase):
def setUp(self):
self.reporter = TestTelemetryReporter()
telemetry.set_reporter(self.reporter)
def tearDown(self):
telemetry.set_reporter(None)
def test_web_state(self):
"""
Metrics should get reported in web state
"""
try:
push_state('web')
logger.debug("This is a dummy debug message")
logger.info("Logging test is running")
logger.info("Here is another info message for the books")
logger.warning("Ignore this dummy warning message")
logger.error("Ignore this dummy error message as well")
logger.critical("Ignore this dummy critical message")
logger.critical("Ignore this dummy critical message as well")
try:
raise Exception("This is a dummy exception not a test failure")
except:
logger.exception("Dummy exception:")
expected_counts = {
'logging.exception.requests': 1,
'logging.error.requests': 2, # Exception counts as an error as well
'logging.warning.requests': 1,
'logging.critical.requests': 2,
}
self.assertEqual(self.reporter.counts, expected_counts)
self.assertFalse(self.reporter.records)
except Exception as e:
logger.exception("test_web_state")
finally:
pop_state('web')
def test_nostate(self):
"""
Metrics shouldn't get reported outside a web state
"""
logger.info("This info message shouldn't get counted")
self.assertFalse(self.reporter.counts)
self.assertFalse(self.reporter.records)
if __name__ == '__main__':
unittest.main()
| librato/librato-python-web | test/instrumentor_/test_logging.py | Python | bsd-3-clause | 3,795 |
import extra_parse
import data
import sys
# Call with Pyth program on STDIN.
global J_used
global K_used
J_used = False
K_used = False
nums = '0123456789'
def make_tree(code):
if code == '':
return [], ''
char, code = code[0], code[1:]
if char == '.' and code[0] not in nums:
char += code[0]
code = code[1:]
if char in '.' + nums:
while code and code[0] in nums:
char += code[0]
code = code[1:]
return [char], code
if char == '"':
_, new_code = extra_parse.str_parse(char, code)
char += code[:len(code) - len(new_code)]
code = new_code
return [char], code
if char == '$':
_, new_code = extra_parse.python_parse(char, code)
char += code[:len(code) - len(new_code)]
code = new_code
return [char], code
if char == '\\':
if code:
char += '\\' + code[0]
code = code[1:]
return [char], code
if char == ')':
return [], code
if char == ';':
return [], ';' + code
if char in data.variables:
return [char], code
global J_used
if char == 'J':
if J_used:
return [char], code
else:
J_used = True
global K_used
if char == 'K':
if K_used:
return [char], code
else:
K_used = True
if char in data.c_to_s or char in 'V':
if char in 'V':
init_arity = 1
else:
init_arity = data.c_to_s[char][1]
args = [char]
while len(args) < init_arity + 1 and code:
child, new_code = make_tree(code)
code = new_code
args.append(child)
while args[-1] and args[-1][0] not in data.end_statement and code:
child, new_code = make_tree(code)
code = new_code
args.append(child)
if not args[-1]:
args = args[:-1]
return args, code
if char in data.c_to_f:
arity = data.c_to_f[char][1]
if char in data.c_to_i:
arity = data.c_to_i[char][1]
if char in data.replacements:
# This may change!
arity = 1
if char in data.c_to_i or char in data.c_to_f or char in data.replacements:
if arity == 0:
return [char], code
if not code:
return [char, []], code
elif code[0] in 'FMI':
arity = 1
char += code[0]
code = code[1:]
elif code[0] in 'LRV':
arity = 2
char += code[0]
code = code[1:]
elif code[0] in 'W':
arity += 1
char += code[0]
code = code[1:]
elif code[0] in 'B':
char += code[0]
code = code[1:]
while code[0] in 'M':
arity = 1
char += code[0]
code = code[1:]
args = [char]
while (arity < 0 or len(args) < arity + 1) and code:
child, new_code = make_tree(code)
code = new_code
args.append(child)
return args, code
raise NameError("%s unimplemented" % char)
def assemble_trees(code):
trees = []
while code:
tree, code = make_tree(code)
if code and code[0] == ';':
code = code[1:]
trees.append(tree)
return trees
def disp_tree(trees):
graph = Digraph()
count = 0
def add(tree, count):
if not tree:
return count
root = count
graph.node(str(root), label=tree[0])
for subtree in tree[1:]:
if subtree:
count += 1
graph.edge(str(root), str(count))
count = add(subtree, count)
return count
for tree in trees:
count = add(tree, count) + 1
graph.render('tree-rep.gv', view=True)
def text_tree(trees):
def single_tree(tree):
head, *children = tree
if not children:
return head
start = head + ' '
rest = (single_tree(children[0]) if len(children) == 1
else ''.join(
'\n' + single_tree(child)
for child in children))
return start + rest.replace('\n', '\n' + ' ' * len(start))
return '\n'.join(single_tree(tree) for tree in trees)
code = input()
trees = assemble_trees(code)
if len(sys.argv) > 1:
from graphviz import Digraph
disp_tree(trees)
else:
print(text_tree(trees))
| jakobkogler/pyth | tree.py | Python | mit | 4,496 |
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/DEM
"""
#General modules
import os
import sys
# Water Accounting modules
from wa.Collect.DEM.DataAccess import DownloadData
def main(Dir, latlim, lonlim, resolution = '3s', Waitbar = 1):
"""
Downloads HydroSHED flow direction data from http://www.hydrosheds.org/download/
this data includes a Digital Elevation Model Flow Direction
The spatial resolution is 90m (3s) or 450m (15s)
The following keyword arguments are needed:
Dir -- 'C:/file/to/path/'
latlim -- [ymin, ymax]
lonlim -- [xmin, xmax]
resolution -- '3s' (default) or '15s'
"""
# Create directory if not exists for the output
output_folder = os.path.join(Dir, 'HydroSHED', 'DIR')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Define the output map and create this if not exists # Define the output map and create this if not exists
nameEnd = os.path.join(Dir, 'HydroSHED', 'DIR', 'DIR_HydroShed_-_%s.tif' %resolution)
parameter = "dir_%s" %resolution
if not os.path.exists(nameEnd):
# Create Waitbar
if Waitbar == 1:
print '\nDownload HydroSHED Drainage Direction map with a resolution of %s' %resolution
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = 1
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Download and process the data
DownloadData(output_folder, latlim, lonlim, parameter, resolution)
if Waitbar == 1:
amount = 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
else:
if Waitbar == 1:
print "\nHydroSHED Drainage direction (%s) already exists in output folder" %resolution
if __name__ == '__main__':
main(sys.argv)
| wateraccounting/wa | Collect/DEM/HydroSHED_Dir.py | Python | apache-2.0 | 2,097 |
from FlaskBackend import *
import sys
import json
import datetime
import logging
app.debug = True
DATABASE = './db/directory.db'
# Setup logging redirection
logger = logging.getLogger('directory')
hdlr = logging.FileHandler('./directory.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
def debug(m):
print >> sys.stderr, m
# Open up a DB connection before the request
@app.before_request
def before_request():
g.db = connect_db(DATABASE)
# Close it after
@app.after_request
def after_request(response):
g.db.close()
return response
# Return the status of the directory
@app.route("/status", methods = ['GET'])
def get_status():
try:
resp = jsonify({'status' : 'ONLINE'})
return resp
except Exception as e:
print >> sys.stderr, str(e)
abort(500)
# Handle heartbeat
@app.route("/heartbeat", methods = ['GET'])
def get_heartbeat():
abort(400)
@app.route("/heartbeat", methods = ['POST'])
def post_heartbeat():
assert request.path == '/heartbeat'
assert request.method == 'POST'
assert request.headers['Content-Type'] == 'application/json'
print >> sys.stderr, "DEBUG: post_heartbeat"
try:
data = json.loads(request.data)
addr = request.remote_addr
match = query_db('select * from gateways where address = "' + str(addr) + '";')
if (match != None and len(match) > 0):
gid = str(match['gateway_id'])
query_db('update gateways set address = "' + str(addr) + '" where gateway_id = ' + gid + ';')
else:
print >> sys.stderr, "Received heartbeat from invalid gateway at " + str(addr)
return jsonify(result = {"status" : 200})
except Exception as e:
print >> sys.stderr, str(e)
abort(500)
# Add the requesting gateway to the directory
@app.route("/connect", methods = ['GET'])
def get_connect():
abort(400)
@app.route("/connect", methods = ['POST'])
def post_connect():
assert request.path == '/connect'
assert request.method == 'POST'
assert request.headers['Content-Type'] == 'application/json'
print >> sys.stderr, "DEBUG: post_connect"
try:
data = json.loads(request.data)
addr = request.remote_addr
# Insert the address into the database - don't overwrite if already there
match = query_db('select * from gateways where address = "' + str(addr) + '";')
if (match == None or len(match) == 0):
query_db('insert into gateways(address, last_update) values ("' + str(addr) + '", "' + str(datetime.datetime.now()) + '");')
else: # update the last_update time
debug(str(match))
time = str(datetime.datetime.now())
gid = str(match[0]['gateway_id'])
query_db('update gateways set last_update = "' + time + '" where gateway_id = ' + gid + ';')
# TOOD: authenticate the client in the future
return jsonify(result = {"status" : 200})
except Exception as e:
print >> sys.stderr, str(e)
abort(500)
# Return a JSON-formatted list of all gateway addresses
@app.route("/list-gateways", methods = ['GET'])
def get_list_gateways():
addresses = []
try:
for gateway in query_db('select * from gateways;'):
addresses.append(gateway['address'])
print gateway['address'], 'has the id', gateway['gateway_id']
list = {'gateways' : addresses}
debug(str(addresses))
debug(str(list))
return jsonify(list)
except Exception as e:
print >> sys.stderr, str(e)
abort(500)
if __name__ == "__main__":
app.run(host='0.0.0.0') | chris-wood/CCNSink | submission/code/GatewayDirectory.py | Python | mit | 3,429 |
import random
from collections import defaultdict
matching = defaultdict(int,{
('P','K'):3,
('K','G'):3,
('G','L'):3,
('L','P'):3,
('K','P'):-3,
('G','K'):-3,
('L','G'):-3,
('P','L'):-3
})
def dieroll(n):
return sum([random.randrange(6) + 1 for i in range(n)])
class Player(object):
def __init__(self,tough,sta,stre,coun):
self.t = tough
self.stam = sta
self.stre = stre
self.coun = coun
self.HP = tough * 6 * 4
self.maxHP = self.HP
self.degree = 1
def move(self):
s = random.randrange(5 + 10*self.degree + 1)
d = 5 + 10*self.degree - s
t = random.choice(['P','K','L','G'])
return s,d,t
def status(self):
if self.HP > self.t * 6 * 3: return 0
if self.HP > self.t * 6 * 2: return 2
if self.HP > self.t * 6 * 1: return 4
if self.HP > self.t * 6 * 0: return 6
class Fight(object):
def __init__(self, p1,p2):
self.underdog = (p1.t + p2.t + p1.stam + p2.stam) * 6
self.p1 = p1
self.p2 = p2
def calcdmg(self,p1,p2):
atk_ = dieroll(p1.stre) + s1 + matching[(t1,t2)]
def_ = dieroll(p2.coun) + s2
if atk_ > def_ :
p2.HP -= d1
#def dmg(pa,pd,sa,da,ta,sd,dd,td)
def fight(self):
ctr = 1
p1 = self.p1
p2 = self.p2
while (p1.HP > 0 and p2.HP > 0):
s1,d1,t1 = p1.move()
s2,d2,t2 = p2.move()
# 1's attack
atk_ = dieroll(p1.stre) + s1 + matching[(t1,t2)]
def_ = dieroll(p2.coun) + s2
if atk_ > def_ :
p2.HP -= d1 + dieroll(p1.stre)
else:
print "Counter 1!"
# 2's attack
atk_ = dieroll(p2.stre) + s2 + matching[(t2,t1)]
def_ = dieroll(p1.coun) + s1
if atk_ > def_ :
p1.HP -= d2 + dieroll(p2.stre)
else:
print "Counter 2!"
# Recovery
p1.HP += dieroll(p1.stam)
if p1.HP > p1.maxHP: p1.HP = p1.maxHP
p2.HP += dieroll(p2.stam)
if p2.HP > p2.maxHP: p2.HP = p2.maxHP
# Degree
a = dieroll(1)
if a > 4:
if dieroll(1) > p1.status():
p1.degree += 1
if a == 1:
if dieroll(1) > 3:
p1.degree -= 1
else:
p1.degree -= 2
a = dieroll(1)
if a > 4:
if dieroll(1) > p2.status():
p2.degree += 1
if a == 1:
if dieroll(1) > 3:
p2.degree -= 1
else:
p2.degree -= 2
if p1.degree < 0: p1.degree = 0
if p2.degree < 0: p2.degree = 0
if p1.degree > 5: p1.degree = 5
if p2.degree > 5: p2.degree = 5
#print "h"
if p1.HP - p2.HP > self.underdog:
print "Underdog"
p2.degree = 5
if p2.HP - p1.HP > self.underdog:
print "Underdog"
p1.degree = 5
print "Turn {}: P1 sd = {}/{}; P2 sd = {}/{}".format(ctr,s1,d1, s2,d2)
print "Turn {}: P1 HP = {}; P2 HP = {}".format(ctr,p1.HP, p2.HP)
print "Turn {}: P1 dg = {}; P2 dg = {}".format(ctr,p1.degree, p2.degree)
print
ctr += 1
print "Player {} Wins!".format(1 if p1.HP > p2.HP else 2)
return 1 if p1.HP > p2.HP else 2, ctr
ctr = 0
ctrc = 0
N = 100
for i in range(N):
p1 = Player(4,2,2,2)
p2 = Player(2,3,2,3)
print "Starting conditions: P1 = {}, {} ; P2 = {}, {}".format(p1.HP,p1.degree,p2.HP,p2.degree)
f=Fight(p1,p2)
print "Underdog:",f.underdog
print
p,c = f.fight()
ctr += p
ctrc += c
print ctr * 1.0/N
print ctrc * 1.0/N
| Shashwat986/BattleSim | battlesim.py | Python | mit | 3,338 |
import numpy as np
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
from sklearn.svm import SVC
clf = SVC()
clf.fit(X, y)
print(clf.predict([[-0.8, -1]])) | askldjd/udacity-machine-learning | svm/test.py | Python | mit | 183 |
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id$
"""
import os, shutil, sys, tempfile, urllib2
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith('java')
# parsing arguments
parser = OptionParser(
'This is a custom version of the zc.buildout %prog script. It is '
'intended to meet a temporary need if you encounter problems with '
'the zc.buildout 1.5 release.')
parser.add_option("-v", "--version", dest="version", default='1.4.4',
help='Use a specific zc.buildout version. *This '
'bootstrap script defaults to '
'1.4.4, unlike usual buildpout bootstrap scripts.*')
parser.add_option("-d", "--distribute",
action="store_true", dest="distribute", default=False,
help="Use Disribute rather than Setuptools.")
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.version is not None:
VERSION = '==%s' % options.version
else:
VERSION = ''
USE_DISTRIBUTE = options.distribute
args = args + ['bootstrap']
to_reload = False
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
to_reload = True
raise ImportError
except ImportError:
ez = {}
if USE_DISTRIBUTE:
exec urllib2.urlopen('http://python-distribute.org/distribute_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0, no_fake=True)
else:
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
if to_reload:
reload(pkg_resources)
else:
import pkg_resources
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
def quote (c):
return c
ws = pkg_resources.working_set
if USE_DISTRIBUTE:
requirement = 'distribute'
else:
requirement = 'setuptools'
env = dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse(requirement)).location
)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(tmpeggs)]
if 'bootstrap-testing-find-links' in os.environ:
cmd.extend(['-f', os.environ['bootstrap-testing-find-links']])
cmd.append('zc.buildout' + VERSION)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
assert exitcode == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout' + VERSION)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| cutoffthetop/zeitnow | bootstrap.py | Python | bsd-2-clause | 4,121 |
from time import sleep
import math
__author__ = 'sergio'
## @package clitellum.endpoints.channels.reconnectiontimers
# Este paquete contiene las clases para los temporizadores de reconexion
#
## Metodo factoria que crea una instancia de un temporizador
# instantaneo
def CreateInstantTimer():
return InstantReconnectionTimer()
## Metodo factoria que crea una instancia de un temporizador
# logaritmico
def CreateLogarithmicTimer():
return LogarithmicReconnectionTimer()
## Metodo factoria que crear una instancia de un temporizador de tiempo constante
def CreateConstantTimer(waiting_time=5):
return ConstantReconnectionTimer(waiting_time=waiting_time)
## Crea una temporizador en funcion del tipo especificado
# @param type Tipo de temporizador "Instant", "Logarithmic"
def CreateTimerFormType(type):
if type == "Instant":
return CreateInstantTimer()
elif type == 'Constant':
return ConstantReconnectionTimer()
else:
return CreateLogarithmicTimer()
## Crea un temporizador a partir de una configuracion
# { type :'Instant' }
# { type :'Constant', time : 10 }
# { type :'Logarithmic' }
def CreateTimerFormConfig(config):
if config['type'] == "Instant":
return CreateInstantTimer()
elif config['type'] == 'Constant':
if not config.get['time'] is None:
return ConstantReconnectionTimer(config['time'])
else:
return ConstantReconnectionTimer()
else:
return CreateLogarithmicTimer()
## Clase base que proporciona la estructura basica de un temporizador de reconexion
class ReconnectionTimer:
## Crea una instancia del temporizador de reconexion
def __init__(self):
pass
## Se espera una vuelta del ciclo antes de continuar
def wait(self):
pass
## Reinicia el temporizador
def reset(self):
pass
## Clase que proporciona un temporizador de reconexion instantaneo,
# no hay tiempo de espera entre un ciclo y el siguiente
class InstantReconnectionTimer(ReconnectionTimer):
## Crea una instancia del temporizador instantaneo
def __init__(self):
ReconnectionTimer.__init__(self)
## Convierte la instancia a string
def __str__(self):
return "Instant Reconnection Timer"
## Define un temporizador de reconexion en el que el tiempo de espera entre un ciclo
# y el siguiente es logaritmico, .
class LogarithmicReconnectionTimer(ReconnectionTimer):
def __init__(self):
ReconnectionTimer.__init__(self)
self.__seed = 1
def wait(self):
waitingTime = ((1 + (1 / self.__seed)) ^ self.__seed) * (1 + math.log10(self.__seed))
if waitingTime < 0:
waitingTime = 0
sleep(waitingTime)
self.__seed += 1
def reset(self):
self.__seed = 1
## Convierte la instancia a string
def __str__(self):
return "Logarithmic Reconnection Timer, seed: %s" % self.__seed
## Define un temporizador de reconexion en el que el tiempo de espera entre un ciclo
# y el siguiente es logaritmico, .
class ConstantReconnectionTimer(ReconnectionTimer):
def __init__(self, waiting_time=5):
ReconnectionTimer.__init__(self)
self.__waiting_time = waiting_time
def wait(self):
sleep(self.__waiting_time)
def reset(self):
pass
## Convierte la instancia a string
def __str__(self):
return "Constant Reconnection Timer, seed: %s" % self.__waiting_time | petxo/clitellum | clitellum/endpoints/channels/reconnectiontimers.py | Python | gpl-3.0 | 3,471 |
#!/usr/bin/env python
print " Formated number:", "{:,}".format(102403)
| daltonmenezes/learning-C | src/Python/format/thousands_separator.py | Python | mit | 72 |
# coding=utf-8
# Copyright 2020 The PI-SAC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics Utils."""
import collections
import tensorflow as tf
from tf_agents.drivers import dynamic_episode_driver
from tf_agents.metrics import tf_metric
from tf_agents.utils import common
class TFDeque(object):
"""Deque backed by tf.Variable storage."""
def __init__(self, max_len, dtype, name='TFDeque'):
shape = (max_len,)
self._dtype = dtype
self._max_len = tf.convert_to_tensor(max_len, dtype=tf.int32)
self._buffer = common.create_variable(
initial_value=0, dtype=dtype, shape=shape, name=name + 'Vars')
self._head = common.create_variable(
initial_value=0, dtype=tf.int32, shape=(), name=name + 'Head')
@property
def data(self):
return self._buffer[:self.length]
@common.function(autograph=True)
def extend(self, value):
for v in value:
self.add(v)
@common.function(autograph=True)
def add(self, value):
position = tf.math.mod(self._head, self._max_len)
self._buffer.scatter_update(tf.IndexedSlices(value, position))
self._head.assign_add(1)
@property
def length(self):
return tf.minimum(self._head, self._max_len)
@common.function
def clear(self):
self._head.assign(0)
self._buffer.assign(tf.zeros_like(self._buffer))
@common.function(autograph=True)
def mean(self):
if tf.equal(self._head, 0):
return tf.zeros((), dtype=self._dtype)
return tf.math.reduce_mean(self._buffer[:self.length])
@common.function(autograph=True)
def stddev(self):
if tf.equal(self._head, 0):
return tf.zeros((), dtype=self._dtype)
return tf.math.reduce_std(self._buffer[:self.length])
class ReturnStddevMetric(tf_metric.TFStepMetric):
"""Metric to compute the return standard deviation."""
def __init__(self,
name='ReturnStddev',
prefix='Metrics',
dtype=tf.float32,
batch_size=1,
buffer_size=10):
super(ReturnStddevMetric, self).__init__(name=name, prefix=prefix)
self._buffer = TFDeque(buffer_size, dtype)
self._dtype = dtype
self._return_accumulator = common.create_variable(
initial_value=0, dtype=dtype, shape=(batch_size,), name='Accumulator')
@common.function(autograph=True)
def call(self, trajectory):
# Zero out batch indices where a new episode is starting.
self._return_accumulator.assign(
tf.where(trajectory.is_first(), tf.zeros_like(self._return_accumulator),
self._return_accumulator))
# Update accumulator with received rewards.
self._return_accumulator.assign_add(trajectory.reward)
# Add final returns to buffer.
last_episode_indices = tf.squeeze(tf.where(trajectory.is_last()), axis=-1)
for indx in last_episode_indices:
self._buffer.add(self._return_accumulator[indx])
return trajectory
def result(self):
return self._buffer.stddev()
@common.function
def reset(self):
self._buffer.clear()
self._return_accumulator.assign(tf.zeros_like(self._return_accumulator))
class ReturnHistogram(tf_metric.TFHistogramStepMetric):
"""Metric to compute the frequency of each action chosen."""
def __init__(self,
name='ReturnHistogram',
dtype=tf.float32,
batch_size=1,
buffer_size=10):
super(ReturnHistogram, self).__init__(name=name)
self._buffer = TFDeque(buffer_size, dtype)
self._dtype = dtype
self._return_accumulator = common.create_variable(
initial_value=0, dtype=dtype, shape=(batch_size,), name='Accumulator')
@common.function(autograph=True)
def call(self, trajectory):
# Zero out batch indices where a new episode is starting.
self._return_accumulator.assign(
tf.where(trajectory.is_first(), tf.zeros_like(self._return_accumulator),
self._return_accumulator))
# Update accumulator with received rewards.
self._return_accumulator.assign_add(trajectory.reward)
# Add final returns to buffer.
last_episode_indices = tf.squeeze(tf.where(trajectory.is_last()), axis=-1)
for indx in last_episode_indices:
self._buffer.add(self._return_accumulator[indx])
return trajectory
# @common.function
def result(self):
return self._buffer.data
@common.function
def reset(self):
self._buffer.clear()
self._return_accumulator.assign(tf.zeros_like(self._return_accumulator))
def eager_compute(metrics,
environment,
policy,
histograms=None,
num_episodes=1,
train_step=None,
summary_writer=None,
summary_prefix='',
use_function=True):
"""Compute metrics using `policy` on the `environment`.
*NOTE*: Because placeholders are not compatible with Eager mode we can not use
python policies. Because we use tf_policies we need the environment time_steps
to be tensors making it easier to use a tf_env for evaluations. Otherwise this
method mirrors `compute` directly.
Args:
metrics: List of metrics to compute.
environment: tf_environment instance.
policy: tf_policy instance used to step the environment.
histograms: (Optional) List of histograms to compute.
num_episodes: Number of episodes to compute the metrics over.
train_step: An optional step to write summaries against.
summary_writer: An optional writer for generating metric summaries.
summary_prefix: An optional prefix scope for metric summaries.
use_function: Option to enable use of `tf.function` when collecting the
metrics.
Returns:
A dictionary of results {metric_name: metric_value}
"""
if histograms is None:
histograms = []
for metric in metrics:
metric.reset()
for histogram in histograms:
histogram.reset()
time_step = environment.reset()
policy_state = policy.get_initial_state(environment.batch_size)
driver = dynamic_episode_driver.DynamicEpisodeDriver(
environment,
policy,
observers=metrics+histograms,
num_episodes=num_episodes)
if use_function:
common.function(driver.run)(time_step, policy_state)
else:
driver.run(time_step, policy_state)
results = [(metric.name, metric.result()) for metric in metrics]
if train_step is not None and summary_writer:
with summary_writer.as_default():
for m in metrics:
tag = common.join_scope(summary_prefix, m.name)
tf.compat.v2.summary.scalar(name=tag, data=m.result(), step=train_step)
for h in histograms:
tag = common.join_scope(summary_prefix, h.name)
tf.compat.v2.summary.histogram(
name=tag, data=h.result(), step=train_step)
return collections.OrderedDict(results)
| google-research/pisac | pisac/metric_utils.py | Python | apache-2.0 | 7,332 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Kegbot Server'
copyright = u'2020, Kegbot Project'
author = u'Kegbot Project'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_issues',
]
# Extensions config
issues_github_path = 'kegbot/kegbot-server'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'KegbotServerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'KegbotServer.tex', u'Kegbot Server Documentation',
u'Kegbot Project', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kegbotserver', u'Kegbot Server Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'KegbotServer', u'Kegbot Server Documentation',
author, 'KegbotServer', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| Kegbot/kegbot-server | docs/source/conf.py | Python | gpl-2.0 | 5,642 |
# -------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Stefan
#
# Created: 11.07.2017
# Copyright: (c) Stefan 2017
# Licence: <your licence>
# -------------------------------------------------------------------------------
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from scrape_interface import ScrapeProcessor
import re
from datetime import datetime
# number of entity for which we download
_ENTITY = 4420465
_titleOverride = {
"2016pv 31 03": "pv 31.03.2016",
"2016PV 27.06. 2016": "PV 27.06.2016",
"2015Pv 20.08": "Pv 20.08.2015",
"2014Pv 22.05": "pv 22.05.2014",
"2014Pv 29.04": "Pv 29.04.2014",
"2014Pv 20.08": "pv 20.08.2014",
"2014Pv 28.07": "Pv 28.07.2014",
"2014PV 30 Septembrie 1": "PV 30.09.2014",
"2014Pv 31.10": "Pv 31.10.2014",
"2014Pv 24.10": "Pv 24.10.2014",
"2014PV 10.12 Sed Indata": "PV 10.12.2014",
"2014PV 6.10": "pv 06.10.2014",
"2014Pv 10.11.": "pv 10.11.2014",
"2014Pv 20.10": "pv 20.10.2014"
}
def extractdata(sp):
print("Start processing entity " + str(_ENTITY))
_process_main(sp, "http://www.primarie3.ro/consiliu-local/hotarari-de-consiliu/", False)
_process_main(sp, "http://www.primarie3.ro/consiliu-local/procese-verbale-de-sedinta/", True)
print("End processing entity " + str(_ENTITY))
# main processing - take all years and process until no more pages
def _process_main(sp, configaddress, ispvpage):
html = ScrapeProcessor.download_page(configaddress)
_process_year(sp, html, ispvpage)
if sp.get_processmode() in (ScrapeProcessor.ProcessMode.DELTA, ScrapeProcessor.ProcessMode.DELTA_DOWNLOAD):
return
soup = BeautifulSoup(html, 'html.parser')
arhiva = soup.find("div", {"class": "list_buget list_buget_arhiva MB30"}).find("ul")
if not arhiva:
print("ERROR: can't find div with class list_buget")
return
for li in arhiva.find_all("li"):
alink = li.find("a")
if not alink.has_attr("href"):
print("ERROR: link was expected to have href")
continue
link = alink["href"]
html2 = ScrapeProcessor.download_page(link)
_process_year(sp, html2, ispvpage)
# process a page with a year from the site
# html = page contents in string
def _process_year(sp, html, ispvpage):
soup = BeautifulSoup(html, 'html.parser')
pagetitle = soup.find("h2", {"class": "MT0"})
if pagetitle is None:
print("ERROR: no H2 title found")
return
match = re.search("(20[0-9]{2})", pagetitle.string)
if not match:
print("ERROR: H2 title was expected to contain a year" + pagetitle.string)
return
year = match.group(1)
lista = soup.find("ul", {"class": "list_buget_p"})
for li in lista.find_all("li"):
alink = li.a
href = alink["href"]
if not href.startswith("http"):
href = urljoin("http://www.primarie3.ro", href)
title = alink.string
if (str(year) + title) in _titleOverride:
title = _titleOverride[str(year) + title]
if ispvpage:
number = 0
else:
match = re.search("hc.*?[ .](\d+)($|\D)", title, re.IGNORECASE)
if not match:
match = re.search("hc.*?[ .](\d+)-(\d+)($|\D)", title, re.IGNORECASE)
if not match:
print("ERROR| Titlul nu incepe cu hc: " + title)
continue
number1 = int(match.group(1))
number2 = int(match.group(2))
if (number2 - number1) < 0 or (number2 - number1) > 10:
print("ERROR|gama invalida: " + title)
continue
for n in range(number1, number2 + 1):
_process_doc(sp, n, year, title, href, "", ispvpage)
return
number = match.group(1)
datetext = ""
if ispvpage:
datetext = ScrapeProcessor.finddate(title)
if datetext == "":
print("ERROR|PV should have a date: " + title)
continue
else:
match = re.search("din (\d+\.\d+\.\d+)", title, re.IGNORECASE)
if match:
datetext = match.group(1)
date = datetime.strptime(datetext, '%d.%m.%Y')
datetext = date.strftime("%Y-%m-%d")
if datetext[:4] != str(year):
print("WARNING| date mismatch " + datetext + " vs year " + str(year))
datetext = ""
# process the found document
code, result = _process_doc(sp, number, year, title, href, datetext, ispvpage)
if code == "ERROR":
print("ERROR|" + title + "|" + result)
# process the info regarding a document (decision)
# decision info should come in docInfo with the following tags:
# date, link, number, year, title
def _process_doc(sp, number, year, title, link, date, ispvpage):
annex = 0
doctype = "MAIN"
#analyse type and post decision
if ispvpage:
number = ScrapeProcessor.dayinyear(date)
code, result = sp.post_decision("PRVB", number, year, _ENTITY, date, title)
if code == "ERROR":
return code, result
decisionid = result
else:
match = re.search("anexa(\d+)", title, re.IGNORECASE)
if match:
annex = match.group(1)
else:
match = re.search("anexa", title, re.IGNORECASE)
if match:
annex = 1
if annex:
code, result = sp.get_decision("HOTA", number, year, _ENTITY)
if code == "ERROR":
return code, result
decisionid = result
doctype = "ANEX"
else:
# add the decision to server
code, result = sp.post_decision("HOTA", number, year, _ENTITY, date, title)
if code == "ERROR":
return code, result
decisionid = result
# download page
code, result = sp.download_file(link)
if code == "ERROR":
sp.post_document(doctype, decisionid, annex, "ERROR_DOWNLOAD", "", link)
return code, result
fname = result
code, result, filetype = sp.ocr_document(fname)
if code == "ERROR":
sp.post_document(doctype, decisionid, annex, "ERROR_OCR", "", link)
return code, result
ocrfname = result
outstr, cssstr = ScrapeProcessor.preparehtml(ocrfname, filetype)
return sp.post_document(doctype, decisionid, annex, outstr, cssstr, link)
if __name__ == '__main__':
localsp = ScrapeProcessor("http://192.168.56.10", "stefan_cioc", "parola1234")
localsp.set_folders("X:/hot/S3I", "X:/hot/S3O")
localsp.set_processmode(ScrapeProcessor.ProcessMode.FULL)
extractdata(localsp)
| stcioc/localdocindex | python/scrape_ps3.py | Python | mit | 7,074 |
#!/usr/bin/env ipython
import os
import readline
from pprint import pprint
from marvinbot.log import configure_logging
from marvinbot import *
from marvinbot.models import *
from marvinbot.plugins import load_plugins
from marvinbot.scheduler import configure_scheduler
from marvinbot.utils import get_config, configure_mongoengine
from marvinbot.cache import configure_cache
import logging
os.environ['PYTHONINSPECT'] = 'True'
config = get_config()
configure_logging(config)
configure_mongoengine(config)
configure_cache(config)
from marvinbot.core import get_adapter, configure_adapter
configure_adapter(config)
adapter = get_adapter()
from marvinbot.tasks import *
from marvinbot.net import *
configure_downloader(config)
configure_scheduler(config, adapter)
load_plugins(config, adapter)
| BotDevGroup/marvin | marvinbot/shell.py | Python | mit | 795 |
"""
The module sweepers exports the definition of sweep homotopies and
the tracking of solution paths defined by sweep homotopies.
A sweep homotopy is a polynomial system where some of the variables
are considered as parameters. Given solutions for some parameters
and new values for the parameters, we can track the solution paths
starting at the given solutions and ending at the new solutions for
the new values of the parameters.
The sweep is controlled by a convex linear combination between the
list of start and target values for the parameters.
We distinguish between a complex and a real sweep.
In a complex sweep, with a randomly generated gamma we avoid singularities
along the solution paths, in a complex convex combination between the
start and target values for the parameters. This complex sweep is
applicable only when the parameter space is convex.
The algorithms applied in this module are described in the paper by
Kathy Piret and Jan Verschelde: Sweeping Algebraic Curves for Singular
Solutions. Journal of Computational and Applied Mathematics,
volume 234, number 4, pages 1228-1237, 2010.
"""
def standard_complex_sweep(pols, sols, nvar, pars, start, target):
r"""
For the polynomials in the list of strings *pols*
and the solutions in *sols* for the values in the list *start*,
a sweep through the parameter space will be performed
in standard double precision to the target values of
the parameters in the list *target*.
The number of variables in the polynomials and the solutions
must be the same and be equal to the value of *nvar*.
The list of symbols in *pars* contains the names of the variables
in the polynomials *pols* that serve as parameters.
The size of the lists *pars*, *start*, and *target* must be same.
"""
from phcpy.interface import store_standard_solutions as storesols
from phcpy.interface import store_standard_system as storesys
storesys(pols, nbvar=nvar)
storesols(nvar, sols)
from phcpy.interface import load_standard_solutions as loadsols
from phcpy.phcpy2c2 \
import py2c_sweep_define_parameters_symbolically as define
from phcpy.phcpy2c2 \
import py2c_sweep_set_standard_start as set_start
from phcpy.phcpy2c2 \
import py2c_sweep_set_standard_target as set_target
from phcpy.phcpy2c2 import py2c_sweep_standard_complex_run as run
(nbq, nbp) = (len(pols), len(pars))
parnames = ' '.join(pars)
nbc = len(parnames)
define(nbq, nvar, nbp, nbc, parnames)
print 'setting the start and the target ...'
set_start(nbp, str(start))
set_target(nbp, str(target))
print 'calling run in standard double precision ...'
run(0, 0.0, 0.0)
result = loadsols()
return result
def dobldobl_complex_sweep(pols, sols, nvar, pars, start, target):
r"""
For the polynomials in the list of strings *pols*
and the solutions in *sols* for the values in the list *start*,
a sweep through the parameter space will be performed
in double double precision to the target values of
the parameters in the list *target*.
The number of variables in the polynomials and the solutions
must be the same and be equal to the value of *nvar*.
The list of symbols in *pars* contains the names of the variables
in the polynomials *pols* that serve as parameters.
The size of the lists *pars*, *start*, and *target* must be same.
"""
from phcpy.interface import store_dobldobl_solutions as storesols
from phcpy.interface import store_dobldobl_system as storesys
storesys(pols, nbvar=nvar)
storesols(nvar, sols)
from phcpy.interface import load_dobldobl_solutions as loadsols
from phcpy.phcpy2c2 \
import py2c_sweep_define_parameters_symbolically as define
from phcpy.phcpy2c2 \
import py2c_sweep_set_dobldobl_start as set_start
from phcpy.phcpy2c2 \
import py2c_sweep_set_dobldobl_target as set_target
from phcpy.phcpy2c2 import py2c_sweep_dobldobl_complex_run as run
(nbq, nbp) = (len(pols), len(pars))
parnames = ' '.join(pars)
nbc = len(parnames)
define(nbq, nvar, nbp, nbc, parnames)
print 'setting the start and the target ...'
set_start(nbp, str(start))
set_target(nbp, str(target))
print 'calling run in double double precision ...'
run(0, 0.0, 0.0)
result = loadsols()
return result
def quaddobl_complex_sweep(pols, sols, nvar, pars, start, target):
r"""
For the polynomials in the list of strings *pols*
and the solutions in *sols* for the values in the list *start*,
a sweep through the parameter space will be performed
in quad double precision to the target values of
the parameters in the list *target*.
The number of variables in the polynomials and the solutions
must be the same and be equal to the value of *nvar*.
The list of symbols in *pars* contains the names of the variables
in the polynomials *pols* that serve as parameters.
The size of the lists *pars*, *start*, and *target* must be same.
"""
from phcpy.interface import store_quaddobl_solutions as storesols
from phcpy.interface import store_quaddobl_system as storesys
storesys(pols, nbvar=nvar)
storesols(nvar, sols)
from phcpy.interface import load_quaddobl_solutions as loadsols
from phcpy.phcpy2c2 \
import py2c_sweep_define_parameters_symbolically as define
from phcpy.phcpy2c2 \
import py2c_sweep_set_quaddobl_start as set_start
from phcpy.phcpy2c2 \
import py2c_sweep_set_quaddobl_target as set_target
from phcpy.phcpy2c2 import py2c_sweep_quaddobl_complex_run as run
(nbq, nbp) = (len(pols), len(pars))
parnames = ' '.join(pars)
nbc = len(parnames)
define(nbq, nvar, nbp, nbc, parnames)
print 'setting the start and the target ...'
set_start(nbp, str(start))
set_target(nbp, str(target))
print 'calling run in quad double precision ...'
run(0, 0.0, 0.0)
result = loadsols()
return result
def standard_real_sweep(pols, sols, par='s', start=0.0, target=1.0):
r"""
A real sweep homotopy is a family of n equations in n+1 variables,
where one of the variables is the artificial parameter s which moves
from 0.0 to 1.0. The last equation can then be of the form
(1 - s)*(lambda - L[0]) + s*(lambda - L[1]) = 0 so that,
at s = 0, the natural parameter lambda has the value L[0], and
at s = 1, the natural parameter lambda has the value L[1].
Thus: as s moves from 0 to 1, lambda goes from L[0] to L[1].
All solutions in the list *sols* must have then the value L[0]
for the variable lambda.
The sweep stops when the target value for s is reached
or when a singular solution is encountered.
Computations happend in standard double precision.
"""
from phcpy.interface import store_standard_solutions as storesols
from phcpy.interface import store_standard_system as storesys
nvar = len(pols) + 1
storesys(pols, nbvar=nvar)
storesols(nvar, sols)
from phcpy.interface import load_standard_solutions as loadsols
from phcpy.phcpy2c2 \
import py2c_sweep_define_parameters_symbolically as define
from phcpy.phcpy2c2 \
import py2c_sweep_set_standard_start as set_start
from phcpy.phcpy2c2 \
import py2c_sweep_set_standard_target as set_target
(nbq, nbp) = (len(pols), 1)
pars = [par]
parnames = ' '.join(pars)
nbc = len(parnames)
define(nbq, nvar, nbp, nbc, parnames)
set_start(nbp, str([start, 0.0]))
set_target(nbp, str([target, 0.0]))
from phcpy.phcpy2c2 import py2c_sweep_standard_real_run as run
run()
result = loadsols()
return result
def dobldobl_real_sweep(pols, sols, par='s', start=0.0, target=1.0):
r"""
A real sweep homotopy is a family of n equations in n+1 variables,
where one of the variables is the artificial parameter s which moves
from 0.0 to 1.0. The last equation can then be of the form
(1 - s)*(lambda - L[0]) + s*(lambda - L[1]) = 0 so that,
at s = 0, the natural parameter lambda has the value L[0], and
at s = 1, the natural parameter lambda has the value L[1].
Thus: as s moves from 0 to 1, lambda goes from L[0] to L[1].
All solutions in the list *sols* must have then the value L[0]
for the variable lambda.
The sweep stops when the target value for s is reached
or when a singular solution is encountered.
Computations happen in double double precision.
"""
from phcpy.interface import store_dobldobl_solutions as storesols
from phcpy.interface import store_dobldobl_system as storesys
nvar = len(pols) + 1
storesys(pols, nbvar=nvar)
storesols(nvar, sols)
# print 'done storing system and solutions ...'
from phcpy.interface import load_dobldobl_solutions as loadsols
from phcpy.phcpy2c2 \
import py2c_sweep_define_parameters_symbolically as define
from phcpy.phcpy2c2 \
import py2c_sweep_set_dobldobl_start as set_start
from phcpy.phcpy2c2 \
import py2c_sweep_set_dobldobl_target as set_target
(nbq, nbp) = (len(pols), 1)
pars = [par]
parnames = ' '.join(pars)
nbc = len(parnames)
# print 'defining the parameters ...'
define(nbq, nvar, nbp, nbc, parnames)
set_start(nbp, str([start, 0.0, 0.0, 0.0])) # double doubles !
set_target(nbp, str([target, 0.0, 0.0, 0.0]))
from phcpy.phcpy2c2 import py2c_sweep_dobldobl_real_run as run
run()
result = loadsols()
return result
def quaddobl_real_sweep(pols, sols, par='s', start=0.0, target=1.0):
r"""
A real sweep homotopy is a family of n equations in n+1 variables,
where one of the variables is the artificial parameter s which moves
from 0.0 to 1.0. The last equation can then be of the form
(1 - s)*(lambda - L[0]) + s*(lambda - L[1]) = 0 so that,
at s = 0, the natural parameter lambda has the value L[0], and
at s = 1, the natural parameter lambda has the value L[1].
Thus: as s moves from 0 to 1, lambda goes from L[0] to L[1].
All solutions in the list *sols* must have then the value L[0]
for the variable lambda.
The sweep stops when the target value for s is reached
or when a singular solution is encountered.
Computations happen in quad double precision.
"""
from phcpy.interface import store_quaddobl_solutions as storesols
from phcpy.interface import store_quaddobl_system as storesys
nvar = len(pols) + 1
storesys(pols, nbvar=nvar)
storesols(nvar, sols)
# print 'done storing system and solutions ...'
from phcpy.interface import load_quaddobl_solutions as loadsols
from phcpy.phcpy2c2 \
import py2c_sweep_define_parameters_symbolically as define
from phcpy.phcpy2c2 \
import py2c_sweep_set_quaddobl_start as set_start
from phcpy.phcpy2c2 \
import py2c_sweep_set_quaddobl_target as set_target
(nbq, nbp) = (len(pols), 1)
pars = [par]
parnames = ' '.join(pars)
nbc = len(parnames)
# print 'defining the parameters ...'
define(nbq, nvar, nbp, nbc, parnames)
set_start(nbp, str([start, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))
set_target(nbp, str([target, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))
from phcpy.phcpy2c2 import py2c_sweep_quaddobl_real_run as run
run()
result = loadsols()
return result
def complex_sweep_test(precision='d'):
"""
Runs a complex sweep on two points on the unit circle.
Although we start at two points with real coordinates
and we end at two points that have nonzero imaginary parts,
the sweep does not encounter a singularity because of
the random complex gamma constant.
"""
from solutions import make_solution as makesol
circle = ['x^2 + y^2 - 1;']
first = makesol(['x', 'y'], [0, 1])
second = makesol(['x', 'y'], [0, -1])
startsols = [first, second]
xpar = ['x']
if(precision == 'd'):
ststart = [0, 0] # real and imaginary parts of the start value
sttarget = [2, 0]
newsols = standard_complex_sweep(circle, startsols, 2, xpar, \
ststart, sttarget)
elif(precision == 'dd'):
ddstart = [0, 0, 0, 0] # double doubles
ddtarget = [2, 0, 0, 0]
newsols = dobldobl_complex_sweep(circle, startsols, 2, xpar, \
ddstart, ddtarget)
elif(precision == 'qd'):
qdstart = [0, 0, 0, 0, 0, 0, 0, 0] # quad doubles
qdtarget = [2, 0, 0, 0, 0, 0, 0, 0]
newsols = quaddobl_complex_sweep(circle, startsols, 2, xpar, \
qdstart, qdtarget)
else:
print 'wrong precision given as input parameter to test'
for sol in newsols:
print sol
def real_sweep_test(precision='d'):
"""
Runs a real sweep on two points on the unit circle: (1,0), (-1,0),
moving the second coordinate from 0 to 2.
The sweep will stop at the quadratic turning point: (0,1).
We can also run the sweep starting at two complex points:
(2*j, sqrt(5)) and (-2*j, sqrt(5)), moving the second coordinate
from sqrt(5) to 0. This sweep will also stop at (0,1).
"""
from solutions import make_solution as makesol
rcircle = ['x^2 + y^2 - 1;', 'y*(1-s) + (y-2)*s;']
rfirst = makesol(['x', 'y', 's'], [1, 0, 0])
rsecond = makesol(['x', 'y', 's'], [-1, 0, 0])
rstartsols = [rfirst, rsecond]
if(precision == 'd'):
rnewsols = standard_real_sweep(rcircle, rstartsols)
elif(precision == 'dd'):
rnewsols = dobldobl_real_sweep(rcircle, rstartsols)
elif(precision == 'qd'):
rnewsols = quaddobl_real_sweep(rcircle, rstartsols)
else:
print 'wrong precision given as input parameter to test'
print 'after the sweep that started at real solutions :'
for sol in rnewsols:
print sol
from math import sqrt
sqrt5 = sqrt(5)
sweepline = '(y - %.15e)*(1-s) + y*s;' % sqrt5
ccircle = ['x^2 + y^2 - 1;', sweepline]
cfirst = makesol(['x', 'y', 's'], [complex(0,2), sqrt5, 0])
csecond = makesol(['x', 'y', 's'], [complex(0,-2), sqrt5, 0])
cstartsols = [cfirst, csecond]
if(precision == 'd'):
cnewsols = standard_real_sweep(ccircle, cstartsols)
elif(precision == 'dd'):
cnewsols = dobldobl_real_sweep(ccircle, cstartsols)
elif(precision == 'qd'):
cnewsols = quaddobl_real_sweep(ccircle, cstartsols)
else:
print 'wrong precision given as input parameter to test'
print 'after the sweep that started at complex solutions :'
for sol in cnewsols:
print sol
if __name__ == "__main__":
real_sweep_test('qd')
#complex_sweep_test('d')
#complex_sweep_test('dd')
#complex_sweep_test('qd')
| janverschelde/PHCpack | src/Python/PHCpy2/phcpy/sweepers.py | Python | gpl-3.0 | 14,790 |
{
event: {
"display_title": "",
"subtitle": "",
"capacity": 0,
"current_fill": 0,
"attendee_min_age": 21,
"lat": "",
"lon": "",
"point": "()",
"venue_id": 3,
"event_directions": "",
"private": "",
"date": DateTime,
"external_url":,
"indoor":,
"outdoor":,
"organizer":,
"host_name":,
"music_genre_id":,
"performance_type_id":,
"start_time":,
"end_time":,
"dress_type_id":,
}
}
has_many :event_categories
has_many :attendees
has_one :venue
{
attendee: {
"first_name": "",
"last_name": "",
"email": "",
"phone": 0,
"birthday": 0,
"external_url":,
"display_photo_url":,
}
}
has_many :events
has_many :friends
has_many :favorite_venues
has_many :payment_methods
{
venue: {
"display_title": "",
"subtitle": "",
"capacity": 0,
"current_fill": 0,
"opening_hours_id":,
"lat": "",
"lon": "",
"point": "()",
"venue_directions": "",
"external_url":,
"indoor":,
"outdoor":,
"management_company":,
"accepts_credit_card":
}
}
has_many :venue_categories
ENUM: {
event_category: {
"display_title": "",
"subtitle": "",
"constant": ""
}
}
ENUM: {
venue_category: {
"display_title": "",
"subtitle": "",
"constant": ""
}
}
| cdepman/falcon_api | models.py | Python | mit | 1,540 |
def char_in_string(s1, s2):
dictionary = {}
for i in s2:
dictionary[i] = True
for i in range(len(s1)):
if s1[i] in dictionary:
return i
return -1
if __name__ == '__main__':
print char_in_string('adf6ysh', '123678')
| MichaelReiter/ProgrammingPractice | char_in_string.py | Python | mit | 240 |
"""Convergence client implementation."""
# Copyright (c) 2011-2013 Loris Cuoghi <loris.cuoghi@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Loris Cuoghi'
__email__ = 'loris.cuoghi@gmail.com'
__license__ = 'GPLv3'
import cuivre
from cuivre.client.client import Client
from cuivre.client.connection import Connection
from cuivre.client.database import Database
from cuivre.client.configuration import Configuration
from cuivre.client.retriever import HTTPSGet
USER_AGENT = 'Cuivre {}'.format(cuivre.__version__)
| loris0/Cuivre | cuivre/client/__init__.py | Python | gpl-3.0 | 1,135 |
#!/usr/bin/env python3
import logging
import datetime
from pycaching.errors import ValueError, LoadError
from pycaching.enums import Type, Size
from pycaching.point import Point
from pycaching.util import Util
from pycaching.trackable import Trackable
# prefix _type() function to avoid colisions with cache type
_type = type
def lazy_loaded(func):
"""Decorator providing lazy loading."""
def wrapper(*args, **kwargs):
self = args[0]
assert isinstance(self, Cache)
try:
return func(*args, **kwargs)
except AttributeError:
logging.debug("Lazy loading: %s", func.__name__)
if hasattr(self, 'url'):
self.geocaching.load_cache_by_url(self.url, self)
elif hasattr(self, '_wp'):
self.geocaching.load_cache(self.wp, self)
else:
raise LoadError("Cache lacks info for lazy loading")
return func(*args, **kwargs)
return wrapper
class Cache(object):
# generated by Util.get_possible_attributes()
# TODO: smarter way of keeping attributes up to date
_possible_attributes = {
"abandonedbuilding": "Abandoned Structure",
"available": "Available at All Times",
"bicycles": "Bicycles",
"boat": "Boat",
"campfires": "Campfires",
"camping": "Camping Available",
"cliff": "Cliff / Falling Rocks",
"climbing": "Difficult Climbing",
"cow": "Watch for Livestock",
"danger": "Dangerous Area",
"dangerousanimals": "Dangerous Animals",
"dogs": "Dogs",
"fee": "Access or Parking Fee",
"field_puzzle": "Field Puzzle",
"firstaid": "Needs Maintenance",
"flashlight": "Flashlight Required",
"food": "Food Nearby",
"frontyard": "Front Yard(Private Residence)",
"fuel": "Fuel Nearby",
"geotour": "GeoTour Cache",
"hike_long": "Long Hike (+10km)",
"hike_med": "Medium Hike (1km-10km)",
"hike_short": "Short Hike (Less than 1km)",
"hiking": "Significant Hike",
"horses": "Horses",
"hunting": "Hunting",
"jeeps": "Off-Road Vehicles",
"kids": "Recommended for Kids",
"landf": "Lost And Found Tour",
"mine": "Abandoned Mines",
"motorcycles": "Motortcycles",
"night": "Recommended at Night",
"nightcache": "Night Cache",
"onehour": "Takes Less Than an Hour",
"parking": "Parking Available",
"parkngrab": "Park and Grab",
"partnership": "Partnership Cache",
"phone": "Telephone Nearby",
"picnic": "Picnic Tables Nearby",
"poisonoak": "Poisonous Plants",
"public": "Public Transportation",
"quads": "Quads",
"rappelling": "Climbing Gear",
"restrooms": "Public Restrooms Nearby",
"rv": "Truck Driver/RV",
"s-tool": "Special Tool Required",
"scenic": "Scenic View",
"scuba": "Scuba Gear",
"seasonal": "Seasonal Access",
"skiis": "Cross Country Skis",
"snowmobiles": "Snowmobiles",
"snowshoes": "Snowshoes",
"stealth": "Stealth Required",
"stroller": "Stroller Accessible",
"swimming": "May Require Swimming",
"teamwork": "Teamwork Required",
"thorn": "Thorns",
"ticks": "Ticks",
"touristok": "Tourist Friendly",
"treeclimbing": "Tree Climbing",
"uv": "UV Light Required",
"wading": "May Require Wading",
"water": "Drinking Water Nearby",
"wheelchair": "Wheelchair Accessible",
"winter": "Available During Winter",
"wirelessbeacon": "Wireless Beacon"
}
def __init__(self, wp, geocaching, *, name=None, type=None, location=None, state=None,
found=None, size=None, difficulty=None, terrain=None, author=None, hidden=None,
attributes=None, summary=None, description=None, hint=None, favorites=None,
pm_only=None, trackables=None, url=None):
self.geocaching = geocaching
if wp is not None:
self.wp = wp
if name is not None:
self.name = name
if type is not None:
self.type = type
if location is not None:
self.location = location
if state is not None:
self.state = state
if found is not None:
self.found = found
if size is not None:
self.size = size
if difficulty is not None:
self.difficulty = difficulty
if terrain is not None:
self.terrain = terrain
if author is not None:
self.author = author
if hidden is not None:
self.hidden = hidden
if attributes is not None:
self.attributes = attributes
if summary is not None:
self.summary = summary
if description is not None:
self.description = description
if hint is not None:
self.hint = hint
if favorites is not None:
self.favorites = favorites
if pm_only is not None:
self.pm_only = pm_only
if trackables is not None:
self.trackables = trackables
if url is not None:
self.url = url
def __str__(self):
return self.wp
def __eq__(self, other):
return self.wp == other.wp
@property
def wp(self):
return self._wp
@wp.setter
def wp(self, wp):
wp = str(wp).upper().strip()
if not wp.startswith("GC"):
raise ValueError("Waypoint '{}' doesn't start with 'GC'.".format(wp))
self._wp = wp
@property
def geocaching(self):
return self._geocaching
@geocaching.setter
def geocaching(self, geocaching):
if not hasattr(geocaching, "load_cache"):
raise ValueError("Passed object (type: '{}') doesn't contain 'load_cache' method.".format(_type(geocaching)))
self._geocaching = geocaching
@property
@lazy_loaded
def name(self):
return self._name
@name.setter
def name(self, name):
name = str(name).strip()
self._name = name
@property
@lazy_loaded
def location(self):
return self._location
@location.setter
def location(self, location):
if _type(location) is str:
location = Point.from_string(location)
elif _type(location) is not Point:
raise ValueError("Passed object is not Point instance nor string containing coordinates.")
self._location = location
@property
@lazy_loaded
def type(self):
return self._type
@type.setter
def type(self, type):
if _type(type) is not Type:
type = Type.from_string(type)
self._type = type
@property
@lazy_loaded
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = bool(state)
@property
@lazy_loaded
def found(self):
return self._found
@found.setter
def found(self, found):
self._found = bool(found)
@property
@lazy_loaded
def size(self):
return self._size
@size.setter
def size(self, size):
if _type(size) is not Size:
size = Size.from_string(size)
self._size = size
@property
@lazy_loaded
def difficulty(self):
return self._difficulty
@difficulty.setter
def difficulty(self, difficulty):
difficulty = float(difficulty)
if difficulty < 1 or difficulty > 5 or difficulty * 10 % 5 != 0: # X.0 or X.5
raise ValueError("Difficulty must be from 1 to 5 and divisible by 0.5.")
self._difficulty = difficulty
@property
@lazy_loaded
def terrain(self):
return self._terrain
@terrain.setter
def terrain(self, terrain):
terrain = float(terrain)
if terrain < 1 or terrain > 5 or terrain * 10 % 5 != 0: # X.0 or X.5
raise ValueError("Terrain must be from 1 to 5 and divisible by 0.5.")
self._terrain = terrain
@property
@lazy_loaded
def author(self):
return self._author
@author.setter
def author(self, author):
author = str(author).strip()
self._author = author
@property
@lazy_loaded
def hidden(self):
return self._hidden
@hidden.setter
def hidden(self, hidden):
if _type(hidden) is str:
hidden = Util.parse_date(hidden)
elif _type(hidden) is not datetime.date:
raise ValueError("Passed object is not datetime.date instance nor string containing a date.")
self._hidden = hidden
@property
@lazy_loaded
def attributes(self):
return self._attributes
@attributes.setter
def attributes(self, attributes):
if _type(attributes) is not dict:
raise ValueError("Attribues is not dict.")
self._attributes = {}
for name, allowed in attributes.items():
name = name.strip().lower()
if name in self._possible_attributes:
self._attributes[name] = allowed
else:
logging.warning("Unknown attribute '%s', ignoring.", name)
@property
@lazy_loaded
def summary(self):
return self._summary
@summary.setter
def summary(self, summary):
summary = str(summary).strip()
self._summary = summary
@property
@lazy_loaded
def description(self):
return self._description
@description.setter
def description(self, description):
description = str(description).strip()
self._description = description
@property
@lazy_loaded
def hint(self):
return self._hint
@hint.setter
def hint(self, hint):
hint = str(hint).strip()
self._hint = hint
@property
@lazy_loaded
def favorites(self):
return self._favorites
@favorites.setter
def favorites(self, favorites):
self._favorites = int(favorites)
@property
def pm_only(self):
return self._pm_only
@pm_only.setter
def pm_only(self, pm_only):
self._pm_only = bool(pm_only)
def inside_area(self, area):
"""Calculate if geocache is inside given area"""
return area.inside_area(self.location)
@property
@lazy_loaded
def trackables(self):
return self._trackables
@trackables.setter
def trackables(self, trackables):
if _type(trackables) is Trackable:
trackables = [trackables]
elif _type(trackables) is not list:
raise ValueError("Passed object is not list")
self._trackables = trackables
| mrvdb/pycaching | pycaching/cache.py | Python | lgpl-3.0 | 10,846 |
"""
Custom migration script to add slug field to all ProviderConfig models.
"""
from django.db import migrations, models
from django.utils.text import slugify
def fill_slug_field(apps, schema_editor):
"""
Fill in the slug field for each ProviderConfig class for backwards compatability.
"""
OAuth2ProviderConfig = apps.get_model('third_party_auth', 'OAuth2ProviderConfig')
SAMLProviderConfig = apps.get_model('third_party_auth', 'SAMLProviderConfig')
LTIProviderConfig = apps.get_model('third_party_auth', 'LTIProviderConfig')
for config in OAuth2ProviderConfig.objects.all():
config.slug = config.provider_slug
config.save()
for config in SAMLProviderConfig.objects.all():
config.slug = config.idp_slug
config.save()
for config in LTIProviderConfig.objects.all():
config.slug = slugify(config.lti_consumer_key)
config.save()
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0018_auto_20180327_1631'),
]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='slug',
field=models.SlugField(default='default', help_text='A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='slug',
field=models.SlugField(default='default', help_text='A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30),
),
migrations.AddField(
model_name='samlproviderconfig',
name='slug',
field=models.SlugField(default='default', help_text='A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30),
),
migrations.RunPython(fill_slug_field, reverse_code=migrations.RunPython.noop),
]
| eduNEXT/edunext-platform | common/djangoapps/third_party_auth/migrations/0019_consolidate_slug.py | Python | agpl-3.0 | 2,165 |
import numpy as np
import pytest
from rig.type_casts import (
float_to_fix, fix_to_float,
float_to_fp, fp_to_float,
NumpyFloatToFixConverter, NumpyFixToFloatConverter
)
import struct
sz = {
8: 'b',
16: 'h',
32: 'i',
}
SZ = {k: v.upper() for k, v in sz.items()}
class TestFloatToFix(object):
"""Test converting from a float to a fixed point.
"""
@pytest.mark.parametrize(
"signed, n_bits, n_frac",
[(True, 32, 32), # Too many frac bits
(False, 32, 33),
(False, -1, 3),
(False, 32, -1), # Negative
])
def test_invalid_parameters(self, signed, n_bits, n_frac):
with pytest.raises(ValueError):
float_to_fix(signed, n_bits, n_frac)
@pytest.mark.parametrize(
"value, n_bits, n_frac, output",
[(0.50, 8, 4, 0x08),
(0.50, 8, 5, 0x10),
(0.50, 8, 6, 0x20),
(0.50, 8, 7, 0x40),
(0.50, 8, 8, 0x80),
(0.25, 8, 4, 0x04),
(0.75, 8, 4, 0x0c),
(1.75, 8, 4, 0x1c),
(-1.75, 8, 4, 0x00), # Clipped
])
def test_no_saturate_unsigned(self, value, n_bits, n_frac, output):
assert float_to_fix(False, n_bits, n_frac)(value) == output
assert float_to_fp(False, n_bits, n_frac)(value) == output
@pytest.mark.parametrize(
"v, n_bits, n_frac, output",
[(0.50, 8, 4, 0x08),
(0.50, 8, 5, 0x10),
(0.50, 8, 6, 0x20),
(0.50, 8, 7, 0x40),
(0.25, 8, 4, 0x04),
(0.75, 8, 4, 0x0c),
(-.50, 8, 4, 0xf8),
(-.50, 8, 5, 0xf0),
(-.50, 8, 6, 0xe0),
(-.50, 8, 7, 0xc0),
(-.25, 8, 4, 0xfc),
(-.75, 8, 4, 0xf4),
(-.25, 8, 1, 0x00),
(1.75, 8, 4, 0x1c),
(-1.75, 8, 4, 0xe4),
(-2.75, 8, 4, 0xd4),
(-1.0, 8, 4, 0xf0),
(-7.9375, 8, 4, 0x81),
(-8, 8, 4, 0x80),
(-16, 8, 4, 0x80),
(-1.0, 8, 3, 0xf8),
(-1.0, 8, 2, 0xfc),
(-1.0, 8, 1, 0xfe),
(-1.0, 16, 1, 0xfffe),
(-1.0, 16, 2, 0xfffc),
])
def test_no_saturate_signed(self, v, n_bits, n_frac, output):
assert float_to_fix(True, n_bits, n_frac)(v) == output
assert (
struct.pack(sz[n_bits], float_to_fp(True, n_bits, n_frac)(v)) ==
struct.pack(SZ[n_bits], output)
)
@pytest.mark.parametrize(
"value, n_bits, n_frac, output",
[(2**4, 8, 4, 0xff), # Saturate
(2**4 - 1 + sum(2**-n for n in range(1, 6)), 8, 4, 0xff), # Saturate
])
def test_saturate_unsigned(self, value, n_bits, n_frac, output):
assert float_to_fix(False, n_bits, n_frac)(value) == output
assert float_to_fp(False, n_bits, n_frac)(value) == output
class TestFloatToFp(object):
@pytest.mark.parametrize(
"signed, n_bits, n_frac, value, output",
((True, 8, -2, 0.25, 0x0),
(True, 8, -2, 4, 0x1),
(True, 8, -2, -4, -0x1),
(False, 8, -2, -4, 0x0),
)
)
def test_negative_nfrac(self, signed, n_bits, n_frac, value, output):
assert float_to_fp(signed, n_bits, n_frac)(value) == output
@pytest.mark.parametrize(
"signed, n_bits, n_frac, value, output",
((True, 8, 8, -0.5, -0x80),
(False, 8, 8, 0.5, 0x80),
(False, 8, 9, 0.5, 0xff),
(False, 8, 9, 0.25, 0x80),
)
)
def test_large_nfrac(self, signed, n_bits, n_frac, value, output):
assert float_to_fp(signed, n_bits, n_frac)(value) == output
class TestFixToFloat(object):
@pytest.mark.parametrize(
"signed, n_bits, n_frac",
[(True, 32, 32), # Too many frac bits
(False, 32, 33),
(False, -1, 3),
(False, 32, -1), # Negative
])
def test_invalid_parameters(self, signed, n_bits, n_frac):
with pytest.raises(ValueError):
fix_to_float(signed, n_bits, n_frac)
@pytest.mark.parametrize(
"bits, signed, n_bits, n_frac, value",
[(0xff, False, 8, 0, 255.0),
(0x81, True, 8, 0, -127.0),
(0xff, False, 8, 1, 127.5),
(0xf8, True, 8, 4, -0.5)
])
def test_fix_to_float(self, bits, signed, n_bits, n_frac, value):
assert value == fix_to_float(signed, n_bits, n_frac)(bits)
@pytest.mark.parametrize(
"bits, n_frac, value",
[(0xff, 0, 255.0),
(-0x7f, 0, -127.0),
(0xff, 1, 127.5),
(-0x08, 4, -0.5)
])
def test_fp_to_float(bits, n_frac, value):
assert value == fp_to_float(n_frac)(bits)
class TestNumpyFloatToFixConverter(object):
def test_init_fails(self):
with pytest.raises(ValueError):
NumpyFloatToFixConverter(False, 31, 0)
@pytest.mark.parametrize(
"signed, n_bits, dtype, n_bytes",
[(False, 8, np.uint8, 1),
(True, 8, np.int8, 1),
(False, 16, np.uint16, 2),
(True, 16, np.int16, 2),
(False, 32, np.uint32, 4),
(True, 32, np.int32, 4),
(False, 64, np.uint64, 8),
(True, 64, np.int64, 8),
])
def test_dtypes(self, signed, n_bits, dtype, n_bytes):
"""Check that the correcy dtype is returned."""
fpf = NumpyFloatToFixConverter(signed, n_bits, 0)
assert fpf.dtype == dtype
assert fpf.bytes_per_element == n_bytes
@pytest.mark.parametrize(
"n_bits, n_frac, values, dtype",
[(8, 4, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 3, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 2, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 1, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 0, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 8, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 9, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(16, 12, [0.5, 0.25, 0.125, 0.0625], np.uint16),
(32, 15, [0.5, 0.25, 0.125, 0.0625], np.uint32),
])
def test_unsigned_no_saturate(self, n_bits, n_frac, values, dtype):
# Create the formatter then call it on the array
fpf = NumpyFloatToFixConverter(False, n_bits, n_frac)
vals = fpf(np.array(values))
# Check the values are correct
ftf = float_to_fp(False, n_bits, n_frac)
assert np.all(vals == np.array([ftf(v) for v in values]))
assert vals.dtype == dtype
@pytest.mark.parametrize(
"n_bits, n_frac, values, dtype",
[(8, 4, [0.5, 0.25, 0.125, 0.0625, -0.5], np.int8),
(8, 3, [0.5, 0.25, 0.125, 0.0625, -0.25], np.int8),
(8, 2, [0.5, 0.25, 0.125, 0.0625, -0.33], np.int8),
(8, 1, [0.5, 0.25, 0.125, 0.0625, -0.25], np.int8),
(8, 0, [0.5, 0.25, 0.125, 0.0625, -0.23], np.int8),
(8, 9, [0.5, 0.25, 0.125, 0.0625, -0.23], np.int8),
(16, 12, [0.5, 0.25, 0.125, 0.0625, -0.45], np.int16),
(32, 15, [0.5, 0.25, 0.125, 0.0625, -0.77], np.int32),
])
def test_signed_no_saturate(self, n_bits, n_frac, values, dtype):
# Create the formatter then call it on the array
fpf = NumpyFloatToFixConverter(True, n_bits, n_frac)
vals = fpf(np.array(values))
# Check the values are correct
ftf = float_to_fp(True, n_bits, n_frac)
assert np.all(vals == np.array([ftf(v) for v in values]))
assert vals.dtype == dtype
@pytest.mark.parametrize("signed", [True, False])
@pytest.mark.parametrize(
"n_bits, n_frac",
[(8, 0), (8, 4), (16, 5), (32, 27)])
def test_saturate(self, signed, n_bits, n_frac):
# Build the values
values = [2.0**(n_bits - n_frac - (1 if signed else 0)),
2.0**(n_bits - n_frac - (1 if signed else 0)) - 1]
# Format
fpf = NumpyFloatToFixConverter(signed, n_bits, n_frac)
vals = fpf(np.array(values))
c = {8: 'B', 16: 'H', 32: 'I'}[n_bits]
# Check the values are correct
ftf = float_to_fix(signed, n_bits, n_frac)
assert ( # pragma: no branch
bytes(vals.data) ==
struct.pack("{}{}".format(len(values), c),
*[ftf(v) for v in values])
)
class TestNumpyFixToFloat(object):
@pytest.mark.parametrize(
"values, dtype, n_frac, expected_values",
[([0xff], np.uint8, 4, np.array([15.9375])),
([0xf8], np.int8, 4, np.array([-.5])),
]
)
def test_standard(self, values, dtype, n_frac, expected_values):
input_array = np.array(values, dtype=dtype)
fpf = NumpyFixToFloatConverter(n_frac)
output_array = fpf(input_array)
assert np.all(output_array == expected_values)
| project-rig/rig | tests/test_type_casts.py | Python | gpl-2.0 | 8,696 |
"""Test that schema creation catalog script behaves correctly."""
import os
import pytest
import xml_helpers.utils as xml_utils
from ipt.scripts.create_schema_catalog import main
from tests.testcommon import shell
from tests.testcommon.settings import TESTDATADIR
@pytest.mark.parametrize(
('sip', 'mets', 'catalog', 'expected_rewrite_uri', 'expected_return_code'),
[('valid_1.7.1_xml_local_schemas', 'mets.xml', None, 1, 0),
('valid_1.7.1_xml_local_schemas', 'mets.xml', '/other/catalog.xml', 1, 0),
('valid_1.7.1_video_container', 'mets.xml', None, 0, 0),
('valid_1.7.1_xml_local_schemas', 'no_mets.xml', None, None, 117),
('invalid_1.7.1_xml_local_schemas_path', 'mets.xml', None, None, 117)],
ids=['METS contain local schemas',
'Different main catalog given from the default',
'METS does not contain local schemas',
'METS missing',
'Invalid XML local schema'])
def test_create_schema_catalog(tmpdir,
sip,
mets,
catalog,
expected_rewrite_uri,
expected_return_code):
"""Test that the script will generate a schema catalog if mets.xml file
can be read. Other than output parameter, other parameters given should
be reflected within the schema catalog file.
"""
output = tmpdir.join('my_catalog_schema.xml').strpath
sip = os.path.join(TESTDATADIR, 'sips', sip)
mets = os.path.join(sip, mets)
args = [mets, sip, output]
if catalog:
args.append('-c')
args.append(catalog)
(returncode, _, _) = shell.run_main(main, args)
assert expected_return_code == returncode
if expected_return_code == 0:
root_element = xml_utils.readfile(output).getroot()
assert root_element.attrib[xml_utils.xml_ns('base')].rstrip('/') == sip
rewrite_uri_count = 0
next_catalog_count = 0
for child in root_element:
if child.tag.endswith('rewriteURI'):
rewrite_uri_count += 1
if child.tag.endswith('nextCatalog'):
next_catalog_count += 1
if catalog:
assert child.attrib['catalog'] == catalog
# There should always be one catalog.
assert next_catalog_count == 1
assert rewrite_uri_count == expected_rewrite_uri
else:
assert os.path.isfile(output) is False
| Digital-Preservation-Finland/dpres-ipt | tests/scripts/create_schema_catalog_test.py | Python | lgpl-3.0 | 2,486 |
# Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 2001 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from types import StringType, TupleType, FunctionType
from Sketch import Publisher
from Sketch.const import CHANGED, SELECTION
from Sketch.warn import warn, warn_tb, INTERNAL
#
# Command Class
#
class Command(Publisher):
def __init__(self, cmd_class, object):
self.cmd_class = cmd_class
self.object = object
def __getattr__(self, attr):
try:
return getattr(self.cmd_class, attr)
except AttributeError:
if attr == 'button_name':
return self.menu_name
raise AttributeError, attr
def get_method(self, path):
if callable(path):
return path
method = self.object
if type(path) != TupleType:
path = (path,)
for name in path:
method = getattr(method, name)
return method
def Invoke(self, args = ()):
if type(args) != TupleType:
args = (args,)
try:
apply(self.get_method(self.command), self.args + args)
except:
warn_tb(INTERNAL)
def Update(self):
# XXX: bitmaps and key_strokes should probably be also changeable
changed = self.set_name(self.get_name())
changed = self.set_sensitive(self.get_sensitive()) or changed
changed = self.set_value(self.get_value()) or changed
if changed:
self.issue(CHANGED)
def get_name(self):
if self.name_cb:
method = self.get_method(self.name_cb)
if method:
return method()
return self.menu_name
def set_name(self, menu_name = None):
changed = self.menu_name != menu_name
if changed:
self.menu_name = menu_name
return changed
def get_sensitive(self):
#print 'get_sensitive', self
if self.sensitive_cb:
method = self.get_method(self.sensitive_cb)
if method:
return method()
else:
warn(INTERNAL, 'no method for sensitive_cb (%s)',
self.sensitive_cb)
return 0
return 1
def set_sensitive(self, sensitive):
changed = self.sensitive != sensitive
if changed:
self.sensitive = sensitive
return changed
def get_value(self):
if self.value_cb:
method = self.get_method(self.value_cb)
if method:
return method()
return self.value
def set_value(self, value):
changed = self.value != value
if changed:
self.value = value
return changed
def GetKeystroke(self):
return self.key_stroke
def GetValue(self):
return self.value
def IsOn(self):
return self.value == self.value_on
def InContext(self):
return 1
def set_bitmap(self, bitmap):
if bitmap:
changed = self.bitmap != bitmap
self.bitmap = bitmap
return changed
return 0
def __repr__(self):
return 'Command: %s' % self.name
class CommandClass:
cmd_class = Command
# default attributes
menu_name = '???'
bitmap = None
key_stroke = None
name_cb = None
sensitive_cb = None
sensitive = 1
value_cb = None
value = 0
value_on = 1
value_off = 0
is_command = 1
is_check = 0
invoke_with_keystroke = 0
callable_attributes = ('name_cb', 'sensitive_cb', 'value_cb')
def __init__(self, name, command, subscribe_to = None, args = (),
is_check = 0, **rest):
self.name = name
self.command = command
self.subscribe_to = subscribe_to
if type(args) != TupleType:
self.args = (args,)
else:
self.args = args
for key, value in rest.items():
setattr(self, key, value)
if is_check:
self.is_check = 1
self.is_command = 0
def InstantiateFor(self, object):
cmd = self.cmd_class(self, object)
if self.subscribe_to:
if type(self.subscribe_to) == TupleType:
attrs = self.subscribe_to[:-1]
for attr in attrs:
object = getattr(object, attr)
subscribe_to = self.subscribe_to[-1]
else:
subscribe_to = self.subscribe_to
object.Subscribe(subscribe_to, cmd.Update)
return cmd
def __repr__(self):
return 'CommandClass: %s' % self.name
class ObjectCommand(Command):
def get_method(self, path):
if type(path) == type(""):
return self.object.document.GetObjectMethod(self.object_class,path)
return Command.get_method(self, path)
def Invoke(self, args = ()):
if type(args) != TupleType:
args = (args,)
try:
apply(self.object.document.CallObjectMethod,
(self.object_class, self.menu_name, self.command) \
+ self.args + args)
except:
warn_tb(INTERNAL)
def get_sensitive(self):
if self.object.document.CurrentObjectCompatible(self.object_class):
return Command.get_sensitive(self)
return 0
def GetKeystroke(self):
return self.key_stroke
def GetValue(self):
return self.value
def InContext(self):
return self.object.document.CurrentObjectCompatible(self.object_class)
def __repr__(self):
return 'ObjectCommand: %s' % self.name
class ObjectCommandClass(CommandClass):
cmd_class = ObjectCommand
object_class = None
def SetClass(self, aclass):
if self.object_class is None:
self.object_class = aclass
#
#
#
class Commands:
def Update(self):
for item in self.__dict__.values():
item.Update()
def __getitem__(self, key):
return getattr(self, key)
def Get(self, name):
try:
return getattr(self, name)
except AttributeError:
for item in self.__dict__.values():
if item.__class__ == Commands:
cmd = item.Get(name)
if cmd:
return cmd
else:
return None
#
#
#
class Keymap:
def __init__(self):
self.map = {}
def AddCommand(self, command):
key_stroke = command.GetKeystroke()
if key_stroke:
if type(key_stroke) == StringType:
key_stroke = (key_stroke,)
for stroke in key_stroke:
if self.map.has_key(stroke):
# XXX: should be user visible if keybindings can be
# changed by user
warn(INTERNAL, 'Warning: Binding %s to %s replaces %s',
command.name, stroke, self.map[stroke].name)
self.map[stroke] = command
def MapKeystroke(self, stroke):
if self.map.has_key(stroke):
return self.map[stroke]
#
#
#
def AddCmd(list, name, menu_name, method = None, **kw):
if type(name) == FunctionType:
name = name.func_name
if method is None:
method = name
elif type(method) == FunctionType:
method = method.func_name
kw['menu_name'] = menu_name
kw['subscribe_to'] = SELECTION
cmd = apply(ObjectCommandClass, (name, method), kw)
list.append(cmd)
| shumik/skencil-c | Sketch/UI/command.py | Python | gpl-2.0 | 7,045 |
#!/usr/bin/env python3
"Load data, create the validation split, train a random forest, evaluate"
"uncomment the appropriate lines to save processed data to disk"
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.metrics import roc_auc_score as AUC
from sklearn.metrics import log_loss
import os
input_file = os.getenv('TRAINING')
#
d = pd.read_csv(input_file, header=0)
features = [f for f in list(d) if 'feature' in f]
train, val = train_test_split( d, test_size = 5000 )
# train, predict, evaluate
n_trees = 100
rf = RF( n_estimators = n_trees, verbose = True )
rf.fit(train[features], train.target)
p = rf.predict_proba(val[features])
ll = log_loss(val.target.values, p[:,1])
auc = AUC( val.target.values, p[:,1] )
print("AUC: {:.2%}, log loss: {:.2%}".format(auc, ll))
| altermarkive/Resurrecting-JimFleming-Numerai | src/ml-zygmuntz--numer.ai/validate.py | Python | mit | 879 |
"""Tests the isort API module"""
import os
from io import StringIO
from unittest.mock import MagicMock, patch
import pytest
from isort import ImportKey, api
from isort.settings import Config
imperfect_content = "import b\nimport a\n"
fixed_content = "import a\nimport b\n"
fixed_diff = "+import a\n import b\n-import a\n"
@pytest.fixture
def imperfect(tmpdir):
imperfect_file = tmpdir.join("test_needs_changes.py")
imperfect_file.write_text(imperfect_content, "utf8")
return imperfect_file
def test_sort_file_with_bad_syntax(tmpdir) -> None:
tmp_file = tmpdir.join("test_bad_syntax.py")
tmp_file.write_text("""print('mismatching quotes")""", "utf8")
with pytest.warns(UserWarning):
api.sort_file(tmp_file, atomic=True)
with pytest.warns(UserWarning):
api.sort_file(tmp_file, atomic=True, write_to_stdout=True)
def test_sort_file(imperfect) -> None:
assert api.sort_file(imperfect)
assert imperfect.read() == fixed_content
def test_sort_file_in_place(imperfect) -> None:
assert api.sort_file(imperfect, overwrite_in_place=True)
assert imperfect.read() == fixed_content
def test_sort_file_to_stdout(capsys, imperfect) -> None:
assert api.sort_file(imperfect, write_to_stdout=True)
out, _ = capsys.readouterr()
assert out == fixed_content.replace("\n", os.linesep)
def test_other_ask_to_apply(imperfect) -> None:
# First show diff, but ensure change wont get written by asking to apply
# and ensuring answer is no.
with patch("isort.format.input", MagicMock(return_value="n")):
assert not api.sort_file(imperfect, ask_to_apply=True)
assert imperfect.read() == imperfect_content
# Then run again, but apply the change (answer is yes)
with patch("isort.format.input", MagicMock(return_value="y")):
assert api.sort_file(imperfect, ask_to_apply=True)
assert imperfect.read() == fixed_content
def test_check_file_no_changes(capsys, tmpdir) -> None:
perfect = tmpdir.join("test_no_changes.py")
perfect.write_text("import a\nimport b\n", "utf8")
assert api.check_file(perfect, show_diff=True)
out, _ = capsys.readouterr()
assert not out
def test_check_file_with_changes(capsys, imperfect) -> None:
assert not api.check_file(imperfect, show_diff=True)
out, _ = capsys.readouterr()
assert fixed_diff.replace("\n", os.linesep) in out
def test_sorted_imports_multiple_configs() -> None:
with pytest.raises(ValueError):
api.sort_code_string("import os", config=Config(line_length=80), line_length=80)
def test_diff_stream() -> None:
output = StringIO()
assert api.sort_stream(StringIO("import b\nimport a\n"), output, show_diff=True)
output.seek(0)
assert fixed_diff in output.read()
def test_sort_code_string_mixed_newlines():
assert api.sort_code_string("import A\n\r\nimportA\n\n") == "import A\r\n\r\nimportA\r\n\n"
def test_find_imports_in_file(imperfect):
found_imports = list(api.find_imports_in_file(imperfect))
assert "b" in [found_import.module for found_import in found_imports]
def test_find_imports_in_code():
code = """
from x.y import z as a
from x.y import z as a
from x.y import z
import x.y
import x
"""
assert len(list(api.find_imports_in_code(code))) == 5
assert len(list(api.find_imports_in_code(code, unique=True))) == 4
assert len(list(api.find_imports_in_code(code, unique=ImportKey.ATTRIBUTE))) == 3
assert len(list(api.find_imports_in_code(code, unique=ImportKey.MODULE))) == 2
assert len(list(api.find_imports_in_code(code, unique=ImportKey.PACKAGE))) == 1
| PyCQA/isort | tests/unit/test_api.py | Python | mit | 3,619 |
import os, sys
from fastlmm.util.runner import *
import base64
import logging
import fastlmm.util.util as util
import cPickle as pickle
class LocalReducer: # implements IRunner
def __init__(self, taskcount, result_file, mkl_num_threads, logging_handler=logging.StreamHandler(sys.stdout), instream=sys.stdin):
logger = logging.getLogger()
if not logger.handlers:
logger.setLevel(logging.INFO)
for h in list(logger.handlers):
logger.removeHandler(h)
if logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)
logger.addHandler(logging_handler)
self.taskcount = taskcount
self.result_file = result_file
if mkl_num_threads != None:
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
if isinstance(instream, str):
self.instream = open(instream,"r")
else:
self.instream = instream
def work_sequence_from_stdin(self):
import re
import zlib
uuencodePattern = re.compile("[0-9]+\tuu\t")
for line in self.instream:
#e.g. 000 gAJ
if None != uuencodePattern.match(line): # reminder: python's "match" looks for match at the start of the string
# hack to get around info messages in stdout
taskindex, uu, encoded = line.split('\t')
c = base64.b64decode(encoded)
s = zlib.decompress(c)
logging.info("taskindex={0}, len(encoded)={1}, len(zipped)={2}, len(pickle)={3}".format(taskindex,len(encoded),len(c),len(s)))
result = pickle.loads(s)
yield result
def run(self, original_distributable):
result_sequence = self.work_sequence_from_stdin()
shaped_distributable = shape_to_desired_workcount(original_distributable, self.taskcount)
if shaped_distributable.work_count != self.taskcount : raise Exception("Assert: expect workcount == taskcount")
result = shaped_distributable.reduce(result_sequence)
#close the instream if it is a file?
#Check that all expected output files are there
JustCheckExists(doPrintOutputNames=True).output(original_distributable)
#Pickle the result to a file
#logging.info("AAA\n\n\n\nABCwd='{0}'\n\nfile='{1}'DEF\n\n\nZZZ".format(os.getcwd(),self.output_file))
if self.result_file is not None:
util.create_directory_if_necessary(self.result_file)
with open(self.result_file, mode='wb') as f:
pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
return result
| MicrosoftGenomics/FaST-LMM | fastlmm/util/runner/LocalReducer.py | Python | apache-2.0 | 2,646 |
# coding: utf8
# 项目系统以及权限控制 | Geew/issue-task | project/__init__.py | Python | mit | 47 |
import logging
import os
import sys
import unittest
from sickchill.oldbeard import browser
class BrowserTestAll(unittest.TestCase):
"""
Test methods in oldbeard.browser
"""
def setUp(self):
self.here = os.path.normpath(os.path.dirname(__file__))
@unittest.skipUnless(os.name == 'nt', 'Test on Windows only')
def test_get_win_drives(self):
"""
Test getWinDrives
"""
drives = browser.getWinDrives()
self.assertIsNotNone(drives)
self.assertIn('C', drives)
def test_get_file_list(self):
"""
Test getFileList
"""
file_list = browser.getFileList(self.here, True, ['py', 'images'])
self.assertIsNotNone(file_list)
for entry in file_list:
self.assertTrue('name' in entry)
self.assertTrue('path' in entry)
self.assertTrue('isImage' in entry)
self.assertTrue('isFile' in entry)
self.assertTrue('isAllowed' in entry)
if entry['name'].endswith(('.jpg', '.jpeg', '.png', '.tiff', '.gif')):
self.assertTrue(entry['isImage'])
else:
self.assertFalse(entry['isImage'])
if entry['name'].endswith('.py') or entry['isImage']:
self.assertTrue(entry['isFile'])
else:
self.assertFalse(entry['isFile'])
self.assertTrue(entry['isAllowed'])
# folders only
file_list = browser.getFileList(self.here, False, [])
self.assertIsNotNone(file_list)
for entry in file_list:
self.assertTrue('name' in entry)
self.assertTrue('path' in entry)
self.assertTrue('isImage' in entry)
self.assertTrue('isFile' in entry)
self.assertTrue('isAllowed' in entry)
self.assertFalse(entry['isImage'])
self.assertFalse(entry['isFile'])
self.assertTrue(entry['isAllowed'])
def test_folders_at_path(self):
"""
Test foldersAtPath
"""
test_list = browser.foldersAtPath(os.path.join(self.here, 'not_a_real_path'))
self.assertEqual(test_list[0]['currentPath'], self.here)
test_list = browser.foldersAtPath('')
if os.name == 'nt':
self.assertEqual(test_list[0]['currentPath'], 'Root')
drives = browser.getWinDrives()
self.assertEqual(len(drives), len(test_list[1:]))
for item in test_list[1:]:
self.assertTrue(item['path'].strip(':\\') in drives)
else:
self.assertEqual(test_list[0]['currentPath'], '/')
test_list = browser.foldersAtPath(os.path.join(self.here), includeParent=True)
self.assertEqual(test_list[0]['currentPath'], self.here)
self.assertEqual(test_list[1]['name'], '..')
self.assertEqual(test_list[1]['path'], os.path.dirname(self.here))
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr)
logging.getLogger(__name__).setLevel(logging.DEBUG)
SUITE = unittest.TestLoader().loadTestsFromTestCase(BrowserTestAll)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| Vagab0nd/SiCKRAGE | tests/browser_tests.py | Python | gpl-3.0 | 3,177 |
# Copyright 2012 Lee Verberne <lee@blarg.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import os, os.path
import shutil as sh
import sys
from fabric.api import abort, local, prompt, warn
# Fabric 1.0 changed changed the scope of cd() to only affect remote calls.
# This bit of kludgery maintains compatibility of this file with fabric 0.9,
# but it is only possible because no remote calls are made in this file
try:
from fabric.api import lcd as cd
except ImportError:
from fabric.api import cd
from ubik import builder, packager
# filemap copies files directly from source to root, there is no build step
defenv = builder.BuildEnv('_root','_root','.')
file_map, file_map_table = None, None
def _install_file_map(fmap, installdir):
for src, dst in fmap:
_install(src, os.path.join(installdir,dst))
def _install(src, dst):
if src and os.path.isdir(src):
sh.copytree(src, dst)
else:
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if src:
sh.copy(src, dst)
def build(pkgtype='deb', env=defenv):
'Builds this package into a directory tree'
if file_map:
_install_file_map(file_map, env.rootdir)
elif file_map_table:
_install_file_map(file_map_table[pkgtype], env.rootdir)
else:
abort("You must register a filemap with this module using register().")
def clean(env=defenv):
'Remove build directory and packages'
with cd(env.srcdir):
local('rm -rf _* *.deb *.rpm', capture=False)
local('find . -name \*.pyc -print -exec rm \{\} \;', capture=False)
def deb(version=None):
'Build a debian package'
package(version, 'deb')
def debiandir(version='0.0', env=defenv):
"Generate DEBIAN dir in rootdir, but don't build package"
if not env.exists('builddir'):
build('deb', env)
packager.DebPackage('package.ini', env).debiandir(version)
def filelist(pkgtype='deb', env=defenv):
'''Outputs default filelist as json (see details)
Generates and prints to stdout a filelist json that can be modified and
used with package.ini's "filelist" option to override the default.
Useful for setting file modes in RPMs'''
if not env.exists('builddir'):
build(pkgtype, env)
packager.Package('package.ini', env).filelist()
def package(version=None, pkgtype='deb', env=defenv):
'Creates deployable packages'
if not version:
version = prompt("What version did you want packaged there, hotshot?")
if not env.exists('builddir'):
warn('Implicitly invoking build')
build(pkgtype, env)
pkg = packager.Package('package.ini', env, pkgtype)
pkg.build(version)
def register(filemap_or_table):
'Register a filemap for use with this module'
global file_map, file_map_table
if isinstance(filemap_or_table, list):
file_map = filemap_or_table
elif isinstance(filemap_or_table, dict):
file_map_table = filemap_or_table
else:
abort("I don't even know what you're talking about.")
def rpm(version=None):
'Build a Red Hat package'
package(version, 'rpm')
def rpmspec(version='0.0', env=defenv):
'Output the generated RPM spec file'
if not env.exists('builddir'):
build('rpm', env)
packager.RpmPackage('package.ini', env).rpmspec(sys.stdout, version)
| kafana/ubik | lib/ubik/fab/filemap.py | Python | gpl-3.0 | 3,975 |
# -*- coding:utf-8 -*-
import unittest
import mock
from employee.models import EmployeeQuestionnaire
from ..views import QuestionnaireSavedMatchesView
class QuestionnaireSavedMatchesViewTestCase(unittest.TestCase):
def test_get_query_should_return_all_user_matches(self):
# setup
view = QuestionnaireSavedMatchesView()
request = mock.Mock()
view.request = request
# action
returned_value = view.get_queryset()
# assert
self.assertEqual(1, request.user.match_set.all.call_count)
self.assertEqual(id(request.user.match_set.all.return_value),
id(returned_value))
def test_get_should_call_template_response_with_template(self):
# setup
view = QuestionnaireSavedMatchesView()
request = mock.Mock()
view.request = request
view.get_context_data = mock.Mock()
view.response_class = mock.Mock()
view.get_queryset = mock.Mock()
template_name = 'matches/questionnaire_saved_matches.html'
# action
view.get(request)
# assert
self.assertEqual(1, view.response_class.call_count)
self.assertEqual(template_name,
view.response_class.call_args[1]['template'][0])
| hellhovnd/dentexchange | dentexchange/apps/matches/tests/test_questionnaire_saved_matches_view.py | Python | bsd-3-clause | 1,262 |
# -*- coding: utf-8 -*-
#
# test_doctests.py - Run any doctests in the package.
# Copyright (C) 2008 by Drew Hess <dhess@bothan.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Run doctests."""
import unittest
import doctest
import util
suite = unittest.TestSuite()
for f in util.doc_file_tests():
suite.addTest(doctest.DocFileSuite(f, module_relative=False))
runner = unittest.TextTestRunner()
runner.run(suite)
| dhess/lobbyists | lobbyists/tests/test_doctests.py | Python | gpl-3.0 | 1,016 |
from sqlalchemy import and_
from codeMarble_Web.database import dao
from codeMarble_Web.model.notice import Notice
def insert_notice(userIndex, content):
return Notice(userIndex=userIndex, content=content, isRead=False)
def select_notice(userIndex):
return dao.query(Notice).\
filter(Notice.userIndex == userIndex)
def update_notice_read(noticeIndex, userIndex):
return dao.query(Notice).\
filter(and_(Notice.noticeIndex == noticeIndex,
Notice.userIndex == userIndex)).\
update(dict(isRead = True)) | codeMarble/codeMarble_Web | codeMarble_Web/utils/utilNoticeQuery.py | Python | gpl-3.0 | 591 |
#@+leo-ver=5-thin
#@+node:2014spring.20140628104046.1746: * @file openshiftlibs.py
#@@language python
#@@tabwidth -4
#@+others
#@+node:2014spring.20140628104046.1747: ** openshiftlibs declarations
#!/usr/bin/env python
import hashlib, inspect, os, random, sys
#@+node:2014spring.20140628104046.1748: ** get_openshift_secret_token
# Gets the secret token provided by OpenShift
# or generates one (this is slightly less secure, but good enough for now)
def get_openshift_secret_token():
token = os.getenv('OPENSHIFT_SECRET_TOKEN')
name = os.getenv('OPENSHIFT_APP_NAME')
uuid = os.getenv('OPENSHIFT_APP_UUID')
if token is not None:
return token
elif (name is not None and uuid is not None):
return hashlib.sha256(name.encode('utf-8') + '-'.encode('utf-8') + uuid.encode('utf-8')).hexdigest()
return None
#@+node:2014spring.20140628104046.1749: ** openshift_secure
# Loop through all provided variables and generate secure versions
# If not running on OpenShift, returns defaults and logs an error message
#
# This function calls secure_function and passes an array of:
# {
# 'hash': generated sha hash,
# 'variable': name of variable,
# 'original': original value
# }
def openshift_secure(default_keys, secure_function = 'make_secure_key'):
# Attempts to get secret token
my_token = get_openshift_secret_token()
# Only generate random values if on OpenShift
my_list = default_keys
if my_token is not None:
# Loop over each default_key and set the new value
for key, value in default_keys.items():
# Create hash out of token and this key's name
sha = hashlib.sha256(my_token.encode('utf-8') + '-'.encode('utf-8') + key.encode('utf-8')).hexdigest()
# Pass a dictionary so we can add stuff without breaking existing calls
vals = { 'hash': sha, 'variable': key, 'original': value }
# Call user specified function or just return hash
my_list[key] = sha
if secure_function is not None:
# Pick through the global and local scopes to find the function.
possibles = globals().copy()
possibles.update(locals())
supplied_function = possibles.get(secure_function)
if not supplied_function:
raise Exception("Cannot find supplied security function")
else:
my_list[key] = supplied_function(vals)
else:
calling_file = inspect.stack()[1][1]
if os.getenv('OPENSHIFT_REPO_DIR'):
base = os.getenv('OPENSHIFT_REPO_DIR')
calling_file.replace(base,'')
sys.stderr.write("OPENSHIFT WARNING: Using default values for secure variables, please manually modify in " + calling_file + "\n")
return my_list
#@+node:2014spring.20140628104046.1750: ** make_secure_key
# This function transforms default keys into per-deployment random keys;
def make_secure_key(key_info):
hashcode = key_info['hash']
key = key_info['variable']
original = key_info['original']
# These are the legal password characters
# as per the Django source code
# (django/contrib/auth/models.py)
chars = 'abcdefghjkmnpqrstuvwxyz'
chars += 'ABCDEFGHJKLMNPQRSTUVWXYZ'
chars += '23456789'
# Use the hash to seed the RNG
random.seed(int("0x" + hashcode[:8], 0))
# Create a random string the same length as the default
rand_key = ''
for _ in range(len(original)):
rand_pos = random.randint(0,len(chars))
rand_key += chars[rand_pos:(rand_pos+1)]
# Reset the RNG
random.seed()
# Set the value
return rand_key
#@-others
#@-leo
| coursemdetw/2015wcms | wsgi/openshift/openshiftlibs.py | Python | gpl-2.0 | 3,730 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Picking Control',
'version': '8.0.1.0.0',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock Picking Control
=====================
Block edition of out picking
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'stock',
],
'data': [
'view/stock_view.xml',
'view/company_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| dvitme/odoo-addons | stock_picking_control/__openerp__.py | Python | agpl-3.0 | 1,610 |
'''This module provides a class for Balances calls to the CC API'''
from currencycloud.http import Http
from currencycloud.resources import PaginatedCollection, Balance, MarginBalanceTopUp
class Balances(Http):
'''This class provides an interface to the Balances endpoints of the CC API'''
def for_currency(self, currency, **kwargs):
'''
Provides the balance for a currency and shows the date that the balance was last updated.
'''
return Balance(self, **self.get('/v2/balances/' + currency, query=kwargs))
def find(self, **kwargs):
'''
Search for a range of balances and receive a paged response. This is useful if you want to
see historic balances.
'''
response = self.get('/v2/balances/find', query=kwargs)
data = [Balance(self, **fields) for fields in response['balances']]
return PaginatedCollection(data, response['pagination'])
def top_up_margin(self, **kwargs):
'''
Provides the balance for a currency and shows the date that the balance was last updated.
'''
return MarginBalanceTopUp(self, **self.post('/v2/balances/top_up_margin', kwargs))
def first(self, **params):
params['per_page'] = 1
return self.find(**params)[0]
| CurrencyCloud/currencycloud-python | src/currencycloud/clients/balances.py | Python | mit | 1,295 |
"""
Uses a folder full of SMOS *.dbl files, converts them with the ESA snap command
line tool pconvert.exe to IMG
Uses then arcpy to to convert IMG to GeoTIFF
and crops them in the process to a specified extent and compresses them
"""
import os, subprocess, shutil
import arcpy
from arcpy import env
from arcpy.sa import *
# folder containing the DBL files
inFol = "D:/Test/SMOS/"
outFol = "D:/Test/SMOStif/"
# .img and tif output folder
imgFol = outFol + "IMGs/"
tifFol = outFol + "Tiffs/"
# ArcGIS Environmnent settings
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
arcpy.env.pyramid = "NONE"
arcpy.env.extent = "85 40 125 55" #XMin, YMin, XMax, YMax
arcpy.env.rasterStatistics = 'STATISTICS 1 1'
# create a list of exisiting output Tiffs, these will be skipped
exList = []
for tiff in os.listdir(tifFol):
if tiff[-3:] == "tif":
exList.append(tiff[:-4])
for dblFile in os.listdir(inFol):
if dblFile[:-4] in exList:
continue
else:
#dblFile = "SM_OPER_MIR_SMUDP2_20150715T101051_20150715T110403_620_001_1.DBL"
dblPath = inFol + dblFile
# SNAP's pconvert.exe path
pcon = "C:/Progra~2/snap/bin/pconvert.exe"
# flags -f (format) -b (band) -o (output folder) for pcon
# converting directly to GeoTiff ('tifp' instead of 'dim') does not work with arcpy for whatever reason
options = ['dim', '1', imgFol]
# Start the subprocess with specified arguments
# creationflags=0x08000000 prevents windows from opening console window (goo.gl/vWf46a)
subP = subprocess.Popen([pcon, '-f', options[0], '-b', options[1], '-o', options[2], dblPath], creationflags=0x08000000)
subP.wait()
# console subprocess sometimes throws error and no output is generated -> skip file & print name
try:
raster = Raster(imgFol + dblFile[:-3] + "data/" + "Soil_Moisture.img")
except:
print dblFile[:-3]
continue
# copy raster to new folder, only honoring above extent, converting to GeoTiff, -999 is nodata
arcpy.CopyRaster_management(raster, tifFol + dblFile[:-3] + "tif", "DEFAULTS","-999", "-999")
# try to delete Files from imgFol (*.data is recognized as folder -> shutil)
for x in os.listdir(imgFol):
try:
if os.path.isdir(imgFol + x):
shutil.rmtree(imgFol + x)
else:
os.remove(imgFol + x)
except:
continue
arcpy.CheckInExtension("Spatial")
| jdegene/ArcGIS-scripts | SMOS.py | Python | mit | 2,615 |
# -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wsgi import ClosingIterator
from werkzeug._internal import _patch_wrapper
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident.
try:
from greenlet import getcurrent as get_ident
except ImportError: # pragma: no cover
try:
from thread import get_ident
except ImportError: # pragma: no cover
from dummy_thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`StackLocal` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
Yu can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return _patch_wrapper(func, self.make_middleware(func))
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __nonzero__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
| rooty/minishopgae | werkzeug/local.py | Python | gpl-3.0 | 13,416 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Abstraction layer for networking functionalities.
Currently Nova and Neutron have duplicated features. This API layer is
introduced to abstract the differences between them for seamless consumption by
different dashboard implementations.
"""
from django.conf import settings # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
class NetworkClient(object):
def __init__(self, request):
neutron_enabled = base.is_service_enabled(request, 'network')
if neutron_enabled:
self.floating_ips = neutron.FloatingIpManager(request)
else:
self.floating_ips = nova.FloatingIpManager(request)
# Not all qunantum plugins support security group,
# so we have enable_security_group configuration parameter.
neutron_sg_enabled = getattr(settings,
'OPENSTACK_NEUTRON_NETWORK',
{}).get('enable_security_group', True)
if neutron_enabled and neutron_sg_enabled:
self.secgroups = neutron.SecurityGroupManager(request)
else:
self.secgroups = nova.SecurityGroupManager(request)
def floating_ip_pools_list(request):
return NetworkClient(request).floating_ips.list_pools()
def tenant_floating_ip_list(request):
return NetworkClient(request).floating_ips.list()
def tenant_floating_ip_get(request, floating_ip_id):
return NetworkClient(request).floating_ips.get(floating_ip_id)
def tenant_floating_ip_allocate(request, pool=None):
return NetworkClient(request).floating_ips.allocate(pool)
def tenant_floating_ip_release(request, floating_ip_id):
return NetworkClient(request).floating_ips.release(floating_ip_id)
def floating_ip_associate(request, floating_ip_id, port_id):
return NetworkClient(request).floating_ips.associate(floating_ip_id,
port_id)
def floating_ip_disassociate(request, floating_ip_id, port_id):
return NetworkClient(request).floating_ips.disassociate(floating_ip_id,
port_id)
def floating_ip_target_list(request):
return NetworkClient(request).floating_ips.list_targets()
def floating_ip_target_get_by_instance(request, instance_id):
return NetworkClient(request).floating_ips.get_target_id_by_instance(
instance_id)
def security_group_list(request):
return NetworkClient(request).secgroups.list()
def security_group_get(request, sg_id):
return NetworkClient(request).secgroups.get(sg_id)
def security_group_create(request, name, desc):
return NetworkClient(request).secgroups.create(name, desc)
def security_group_delete(request, sg_id):
return NetworkClient(request).secgroups.delete(sg_id)
def security_group_update(request, sg_id, name, desc):
return NetworkClient(request).secgroups.update(sg_id, name, desc)
def security_group_rule_create(request, parent_group_id,
direction, ethertype,
ip_protocol, from_port, to_port,
cidr, group_id):
return NetworkClient(request).secgroups.rule_create(
parent_group_id, direction, ethertype, ip_protocol,
from_port, to_port, cidr, group_id)
def security_group_rule_delete(request, sgr_id):
return NetworkClient(request).secgroups.rule_delete(sgr_id)
def server_security_groups(request, instance_id):
return NetworkClient(request).secgroups.list_by_instance(instance_id)
def server_update_security_groups(request, instance_id, new_sgs):
return NetworkClient(request).secgroups.update_instance_security_group(
instance_id, new_sgs)
def security_group_backend(request):
return NetworkClient(request).secgroups.backend
| openstack-ja/horizon | openstack_dashboard/api/network.py | Python | apache-2.0 | 4,543 |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# TODO extend this test from the test framework (like all other tests)
import tempfile
import traceback
from test_framework.util import *
from test_framework.netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = bitcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_bitcoinds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = get_rpc_proxy(url, 1)
node.getnetworkinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_bitcoinds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| janko33bd/bitcoin | qa/rpc-tests/rpcbind_test.py | Python | mit | 5,587 |
import signal
_signal_name = {}
_name_signal = {}
def _init_lookup_tables():
d = signal.__dict__
for k in d.keys():
if k[:3] == 'SIG' and k[3] != '_':
v = d[k]
if not _signal_name.has_key(v): # first name seen seems more canonical
_signal_name[v] = k
_name_signal[k] = v
def lookup_number(signalname):
n = _name_signal.get(signalname, None)
if n != None:
return n
else:
return int(signalname[4:])
def lookup_name(signalnumber):
return _signal_name.get(signalnumber, "SIG_%d" % signalnumber)
_init_lookup_tables()
| tutufan/subterfugue | signalmap.py | Python | gpl-2.0 | 626 |
from email.utils import parseaddr
from sqlalchemy import not_
from datetime import datetime
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from deform import (
Form,
widget,
ValidationFailure,
)
from ..tools import (email_validator,BULANS, captcha_submit, get_settings)
from ..models import DBSession, User, UserGroup, Group
from ..models.isipkd import(
SubjekPajak,
ARInvoice,
Unit,
UserUnit,
ObjekPajak
)
from ..security import group_in
from datatables import (
ColumnDT, DataTables)
from daftar import (STATUS, deferred_status,
daftar_user, deferred_user)
SESS_ADD_FAILED = 'Gagal tambah wp'
SESS_EDIT_FAILED = 'Gagal edit wp'
########
# List #
########
@view_config(route_name='wp', renderer='templates/wp/list.pt',
permission='read')
def view_list(request):
return dict(rows={})
#######
# Add #
#######
def form_validator(form, value):
def err_kode():
raise colander.Invalid(form,
'Kode wp %s sudah digunakan oleh ID %d' % (
value['kode'], found.id))
def err_name():
raise colander.Invalid(form,
'Nama %s sudah digunakan oleh ID %d' % (
value['nama'], found.id))
def err_user():
raise colander.Invalid(form,
'User ID %s sudah digunakan oleh ID %d' % (
value['user_id'], found.id))
if 'id' in form.request.matchdict:
uid = form.request.matchdict['id']
q = DBSession.query(SubjekPajak).filter_by(id=uid)
r = q.first()
else:
r = None
q = DBSession.query(SubjekPajak).filter_by(kode=value['kode'])
found = q.first()
if r:
if found and found.id != r.id:
err_kode()
elif found:
err_kode()
if 'nama' in value: # optional
found = SubjekPajak.get_by_nama(value['nama'])
if r:
if found and found.id != r.id:
err_name()
elif found:
err_name()
if 'user_id' in value and int(value['user_id'])>0:
found = SubjekPajak.get_by_user_wp(value['user_id'])
if r:
if found and found.id != r.id:
err_user()
elif found:
err_user()
if 'login' in value: # and int(value['user_id'])==0:
found = User.get_by_name(value['kode'])
if r:
if found and found.id != r.id:
err_user()
elif found:
err_user()
class AddSchema(colander.Schema):
'''
user_id = colander.SchemaNode(
colander.Integer(),
widget = deferred_user,
#oid="user_id",
title="User")
'''
kode = colander.SchemaNode(
colander.String(),
title ="NPWPD/No.Reg"
)
nama = colander.SchemaNode(
colander.String()
)
alamat_1 = colander.SchemaNode(
colander.String(),
title ="Alamat"
)
alamat_2 = colander.SchemaNode(
colander.String(),
missing=colander.drop,
title ="Alamat ke-2"
)
kelurahan = colander.SchemaNode(
colander.String(),
missing=colander.drop
)
kecamatan = colander.SchemaNode(
colander.String(),
missing=colander.drop
)
kota = colander.SchemaNode(
colander.String(),
missing=colander.drop,
title ="Kabupaten/Kota"
)
provinsi = colander.SchemaNode(
colander.String(),
missing=colander.drop
)
status = colander.SchemaNode(
colander.Integer(),
widget=deferred_status,
title="Status")
login = colander.SchemaNode(
colander.Boolean(),
missing = colander.drop,
title='Buat Login'
)
email = colander.SchemaNode(
colander.String(),
validator=email_validator,
title = 'E-Mail',
missing=colander.drop,
oid = 'email'
)
unit_id = colander.SchemaNode(
colander.Integer(),
widget=widget.HiddenWidget(),
oid="unit_id",
title="OPD",
)
unit_nm = colander.SchemaNode(
colander.String(),
title="OPD",
oid="unit_nm"
)
class EditSchema(AddSchema):
id = colander.SchemaNode(colander.Integer(),
missing=colander.drop,
widget=widget.HiddenWidget(readonly=True),
title="")
def get_form(request, class_form):
schema = class_form(validator=form_validator)
schema = schema.bind(daftar_status=STATUS,
daftar_user=daftar_user(),
)
schema.request = request
return Form(schema, buttons=('simpan','batal'))
def save(request,values, row=None):
login = None
if not row:
row = SubjekPajak()
row.from_dict(values)
#Sementara untuk user yg masuk ke Subjek adalah user yg login dan yg menginputkan data subjek (Bukan subjek yg dibuatkan user login)
if login:
row.user_id=request.user.id #login.id
else:
row.user_id=request.user.id
if not row.user_id:
row.user_id=None
DBSession.add(row)
DBSession.flush()
if 'login' in values and values['login']:
login = User()
login.status = values['status']
login.user_name = values['email']
login.email = values['email']
login.password = values['kode']
DBSession.add(login)
DBSession.flush()
if login.id:
q = DBSession.query(UserGroup).join(Group).filter(UserGroup.user_id==login.id,
Group.group_name=='wp').first()
if not q:
usergroup = UserGroup()
usergroup.user_id = login.id
usergroup.group_id = DBSession.query(Group.id).filter_by(group_name='wp').scalar()
DBSession.add(usergroup)
DBSession.flush()
userunit = UserUnit()
userunit.user_id = login.id
userunit.unit_id = DBSession.query(Unit.id).filter_by(id=row.unit_id).scalar()
DBSession.add(userunit)
DBSession.flush()
return row
def save_request(values, request, row=None):
if 'id' in request.matchdict:
values['id'] = request.matchdict['id']
row = save(request, values, row)
print '----------------ROW-------------------',row
if row:
request.session.flash('Penyetor %s %s sudah disimpan.' % (row.kode, row.nama))
def route_list(request):
return HTTPFound(location=request.route_url('wp'))
def session_failed(request, session_name):
r = dict(form=request.session[session_name])
del request.session[session_name]
return r
@view_config(route_name='wp-add', renderer='templates/wp/add.pt',
permission='add')
def view_add(request):
form = get_form(request, AddSchema)
values = {}
u = request.user.id
print '----------------User_Login---------------',u
if u != 1 :
x = DBSession.query(UserGroup.group_id).filter(UserGroup.user_id==u).first()
y = '%s' % x
z = int(y)
print '----------------Group_id-----------------',z
if z == 2:
print '----------------User_id-------------------',u
a = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
b = '%s' % a
c = int(b)
values['unit_id'] = c
print '----------------Unit id-------------------------',values['unit_id']
unit = DBSession.query(Unit.nama.label('unm')
).filter(Unit.id==c,
).first()
values['unit_nm'] = unit.unm
print '----------------Unit nama-----------------------',values['unit_nm']
form.set_appstruct(values)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
controls_dicted = dict(controls)
#Cek Email sama ato tidak
a = form.validate(controls)
print '-------------F------------',a
b = controls_dicted['email']
d = a['login']
e = "%s" % d
if e == 'True':
if b != '':
c = "%s" % b
cek = DBSession.query(User).filter(User.email==c).first()
if cek :
request.session.flash('Email sudah digunakan.', 'error')
return HTTPFound(location=request.route_url('wp-add'))
else:
request.session.flash('Email harus diisi.','error')
return HTTPFound(location=request.route_url('wp-add'))
else:
if b != '':
c = "%s" % b
cek = DBSession.query(User).filter(User.email==c).first()
if cek :
request.session.flash('Email sudah digunakan.', 'error')
return HTTPFound(location=request.route_url('wp-add'))
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
#request.session[SESS_ADD_FAILED] = e.render()
#return HTTPFound(location=request.route_url('wp-add'))
save_request(dict(controls), request)
return route_list(request)
elif SESS_ADD_FAILED in request.session:
return session_failed(request, SESS_ADD_FAILED)
return dict(form=form)
########
# Edit #
########
def query_id(request):
return DBSession.query(SubjekPajak).filter_by(id=request.matchdict['id'])
def id_not_found(request):
msg = 'Penyetor ID %s not found.' % request.matchdict['id']
request.session.flash(msg, 'error')
return route_list(request)
@view_config(route_name='wp-edit', renderer='templates/wp/edit.pt',
permission='edit')
def view_edit(request):
row = query_id(request).first()
uid = row.id
email = row.email
found = 0
if not row:
return id_not_found(request)
x = DBSession.query(ARInvoice).filter(ARInvoice.subjek_pajak_id==uid).first()
if x:
request.session.flash('Tidak bisa diedit, karena penyetor sudah digunakan di daftar bayar.','error')
return route_list(request)
y = DBSession.query(User.email).filter(User.email==email).first()
form = get_form(request, EditSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
controls_dicted = dict(controls)
#Cek Email sama ato tidak
a = form.validate(controls)
print '-------------F------------',a
b = controls_dicted['email']
d = a['login']
e = "%s" % d
if e == 'True':
if b != '':
c = "%s" % b
cek = DBSession.query(User).filter(User.email==c).first()
if cek :
request.session.flash('Email sudah digunakan.', 'error')
return HTTPFound(location=request.route_url('wp-edit',id=row.id))
else:
request.session.flash('Email harus diisi.','error')
return HTTPFound(location=request.route_url('wp-edit',id=row.id))
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
#request.session[SESS_EDIT_FAILED] = e.render()
#return HTTPFound(location=request.route_url('wp-edit',
# id=row.id))
save_request(dict(controls), request, row)
return route_list(request)
elif SESS_EDIT_FAILED in request.session:
return session_failed(request, SESS_EDIT_FAILED)
values = row.to_dict()
values['alamat_2'] = row and row.alamat_2 or ''
values['kelurahan'] = row and row.kelurahan or ''
values['kecamatan'] = row and row.kecamatan or ''
values['kota'] = row and row.kota or ''
values['provinsi'] = row and row.provinsi or ''
values['email'] = row and row.email or ''
if y:
found = 1
values['login'] = found
#cek = DBSession.query(User).filter(User.email==row.email).first()
#if cek:
# values['login'] = True
values['unit_nm'] = row and row.units.nama or None
form.set_appstruct(values)
return dict(form=form, found=found)
##########
# Delete #
##########
@view_config(route_name='wp-delete', renderer='templates/wp/delete.pt',
permission='delete')
def view_delete(request):
q = query_id(request)
row = q.first()
id = row.id
x = DBSession.query(ObjekPajak).filter(ObjekPajak.subjekpajak_id==id).first()
if x:
request.session.flash('Tidak bisa dihapus, karena penyetor sudah digunakan di Objek Pajak.','error')
return route_list(request)
y = DBSession.query(ARInvoice).filter(ARInvoice.subjek_pajak_id==id).first()
if y:
request.session.flash('Tidak bisa dihapus, karena penyetor sudah digunakan di daftar bayar.','error')
return route_list(request)
if not row:
return id_not_found(request)
form = Form(colander.Schema(), buttons=('hapus','batal'))
if request.POST:
if 'hapus' in request.POST:
msg = 'Penyetor %s %s sudah dihapus.' % (row.kode, row.nama)
q.delete()
DBSession.flush()
request.session.flash(msg)
return route_list(request)
return dict(row=row, form=form.render())
##########
# Action #
##########
@view_config(route_name='wp-act', renderer='json',
permission='read')
def view_act(request):
req = request
params = req.params
url_dict = req.matchdict
u = request.user.id
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('kode'))
columns.append(ColumnDT('nama'))
columns.append(ColumnDT('alamat_1'))
columns.append(ColumnDT('alamat_2'))
columns.append(ColumnDT('status'))
columns.append(ColumnDT('units.nama'))
query = DBSession.query(SubjekPajak).join(Unit).filter(SubjekPajak.status_grid==0)
if group_in(request, 'bendahara'):
query = query.join(UserUnit).filter(UserUnit.user_id==u)
rowTable = DataTables(req, SubjekPajak, query, columns)
return rowTable.output_result()
elif url_dict['act']=='hon':
term = 'term' in params and params['term'] or ''
rows = DBSession.query(SubjekPajak.id, SubjekPajak.nama
).filter(SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.status==1,
SubjekPajak.status_grid==0
).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
r.append(d)
return r
elif url_dict['act']=='hon_tbp':
term = 'term' in params and params['term'] or ''
unit_id = 'unit_id' in params and params['unit_id'] or ''
if group_in(request, 'bendahara'):
print "----- Unit TBP ----- ",unit_id
rows = DBSession.query(SubjekPajak.id,
SubjekPajak.nama,
SubjekPajak.alamat_1,
SubjekPajak.alamat_2
).join(Unit
).outerjoin(UserUnit
).filter(SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.status==1,
SubjekPajak.status_grid==0,
Unit.id==SubjekPajak.unit_id,
UserUnit.unit_id==Unit.id,
UserUnit.user_id==u
).all()
else:
rows = DBSession.query(SubjekPajak.id,
SubjekPajak.nama,
SubjekPajak.alamat_1,
SubjekPajak.alamat_2
).filter(SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.status==1,
SubjekPajak.status_grid==0,
SubjekPajak.unit_id==unit_id
).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
d['alamat_1'] = k[2]
d['alamat_2'] = k[3]
r.append(d)
return r
## Invoice BUD ##
elif url_dict['act']=='hon1':
term = 'term' in params and params['term'] or ''
rows = DBSession.query(SubjekPajak.id, SubjekPajak.nama, SubjekPajak.user_id, SubjekPajak.unit_id
).filter(SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.status==1,
SubjekPajak.status_grid==0
).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
d['user'] = k[2]
d['unit'] = k[3]
r.append(d)
return r
## Invoice Bendahara ##
elif url_dict['act']=='hon2':
term = 'term' in params and params['term'] or ''
u = request.user.id
rows = DBSession.query(SubjekPajak.id, SubjekPajak.nama, SubjekPajak.user_id, SubjekPajak.unit_id
).join(Unit
).outerjoin(UserUnit
).filter(SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.status==1,
SubjekPajak.status_grid==0,
Unit.id==SubjekPajak.unit_id,
UserUnit.unit_id==Unit.id,
UserUnit.user_id==u
).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
d['user'] = k[2]
d['unit'] = k[3]
r.append(d)
return r
## Invoice WP ##
elif url_dict['act']=='hon3':
term = 'term' in params and params['term'] or ''
u = request.user.id
a = DBSession.query(User.email).filter(User.id==u).first()
rows = DBSession.query(SubjekPajak.id, SubjekPajak.nama, SubjekPajak.user_id, SubjekPajak.unit_id
).filter(SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.email==a,
SubjekPajak.status==1,
SubjekPajak.status_grid==0
).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
d['user'] = k[2]
d['unit'] = k[3]
r.append(d)
return r
elif url_dict['act']=='ho_objek':
term = 'term' in params and params['term'] or ''
u = request.user.id
print '----------------User_Login---------------',u
if u != 1:
x = DBSession.query(UserGroup.group_id).filter(UserGroup.user_id==u).first()
y = '%s' % x
z = int(y)
print '----------------Group_id-----------------',z
if z == 1:
a = DBSession.query(User.email).filter(User.id==u).first()
print '----------------Email--------------------',a
rows = DBSession.query(SubjekPajak.id, SubjekPajak.nama, SubjekPajak.user_id, SubjekPajak.unit_id
).filter(SubjekPajak.email==a,
SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.status==1,
SubjekPajak.status_grid==0
).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
d['user'] = k[2]
d['unit'] = k[3]
r.append(d)
print '----------------Penyetor-----------------',r
return r
elif z == 2:
print '----------------User_id------------------',u
rows = DBSession.query(SubjekPajak.id, SubjekPajak.nama, SubjekPajak.user_id, SubjekPajak.unit_id
).join(Unit
).outerjoin(UserUnit
).filter(SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.status==1,
SubjekPajak.status_grid==0,
Unit.id==SubjekPajak.unit_id,
UserUnit.unit_id==Unit.id,
UserUnit.user_id==u
).all()
#if group_in(request, 'bendahara'):
# rows = query.join(UserUnit).filter(UserUnit.user_id==u)
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
d['user'] = k[2]
d['unit'] = k[3]
r.append(d)
print '----------------Bendahara----------------',r
return r
else:
rows = DBSession.query(SubjekPajak.id, SubjekPajak.nama, SubjekPajak.user_id, SubjekPajak.unit_id
).filter(SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.status==1,
SubjekPajak.status_grid==0
).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
d['user'] = k[2]
d['unit'] = k[3]
r.append(d)
print '----------------BUD----------------------',r
return r
else:
rows = DBSession.query(SubjekPajak.id, SubjekPajak.nama, SubjekPajak.user_id, SubjekPajak.unit_id
).filter(SubjekPajak.nama.ilike('%%%s%%' % term),
SubjekPajak.status==1,
SubjekPajak.status_grid==0
).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
d['user'] = k[2]
d['unit'] = k[3]
r.append(d)
print '----------------ADMIN--------------------',r
return r
from ..reports.rml_report import open_rml_row, open_rml_pdf, pdf_response
def query_reg():
return DBSession.query(SubjekPajak.kode,
SubjekPajak.nama,
SubjekPajak.alamat_1,
SubjekPajak.kelurahan,
SubjekPajak.kecamatan,
SubjekPajak.kota,
SubjekPajak.email,
Unit.nama.label('unit')
).join(Unit
).filter(SubjekPajak.status_grid==0
).order_by(SubjekPajak.kode)
########
# CSV #
########
@view_config(route_name='wp-csv', renderer='csv')
def view_csv(request):
ses = request.session
params = request.params
url_dict = request.matchdict
u = request.user.id
a = datetime.now().strftime('%d-%m-%Y')
if url_dict['csv']=='reg' :
query = query_reg()
if group_in(request, 'bendahara'):
query = query.join(UserUnit).filter(UserUnit.user_id==u)
row = query.first()
print "-- ROW -- ",row
header = row.keys()
rows = []
for item in query.all():
rows.append(list(item))
# override attributes of response
filename = 'Penyetor_%s.csv' %(a)
request.response.content_disposition = 'attachment;filename=' + filename
return {
'header': header,
'rows' : rows,
}
##########
# PDF #
##########
@view_config(route_name='wp-pdf', permission='read')
def view_pdf(request):
params = request.params
url_dict = request.matchdict
u = request.user.id
if url_dict['pdf']=='reg' :
query = query_reg()
if group_in(request, 'bendahara'):
query = query.join(UserUnit).filter(UserUnit.user_id==u)
rml_row = open_rml_row('wp.row.rml')
rows=[]
for r in query.all():
s = rml_row.format(kode=r.kode,
nama=r.nama,
alamat=r.alamat_1,
kelurahan=r.kelurahan,
kecamatan=r.kecamatan,
kota=r.kota,
email=r.email,
unit=r.unit)
rows.append(s)
print "--- ROWS ---- ",rows
pdf, filename = open_rml_pdf('wp.rml', rows2=rows)
return pdf_response(request, pdf, filename) | aagusti/e-sipkd | esipkd_ori_26012017/views/wp.py | Python | lgpl-3.0 | 27,272 |
# Link: https://oj.leetcode.com/problems/evaluate-reverse-polish-notation/
class Solution:
# @param tokens, a list of string
# @return an integer
def evalRPN(self, tokens):
"""
Use stack.
An easy problem.
Care about the accuracy !
"""
import operator as op
operators = {
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.div
}
stack = [];
for t in tokens:
if t in operators:
a = float(stack.pop())
b = float(stack.pop())
val = operators[t](b, a) # should use if-else things instead
stack.append(int(val))
else:
stack.append(t)
return int(stack[0])
| ibigbug/leetcode | evaluate-reverse-polish-notation.py | Python | mit | 802 |
from PySide import QtGui
class SettingsDialog(QtGui.QDialog):
def __init__(self, ip, interval):
super(SettingsDialog, self).__init__()
self._ip = ip
self._interval = interval
self.setupUi()
@property
def ip(self):
return self._ip
@property
def interval(self):
return self._interval
def setupUi(self):
self.setWindowTitle("Settings")
layout = QtGui.QFormLayout()
self._ipField = QtGui.QLineEdit()
self._ipField.setText(self.ip)
layout.addRow("IP:", self._ipField)
self._intervalField = QtGui.QLineEdit()
self._intervalField.setText(str(self.interval))
layout.addRow("Interval:", self._intervalField)
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.onAccept)
buttonBox.rejected.connect(self.reject)
layout.addRow(buttonBox)
self.setLayout(layout)
def onAccept(self):
self._ip = self._ipField.text()
self._interval = int(self._intervalField.text())
self.accept()
| ilya-fedin/hilink-tray | hilink/settings.py | Python | mit | 1,153 |
from web3.module import (
Module,
)
class Net(Module):
@property
def listening(self):
return self.web3.manager.request_blocking("net_listening", [])
@property
def peerCount(self):
return self.web3.manager.request_blocking("net_peerCount", [])
@property
def version(self):
return self.web3.manager.request_blocking("net_version", [])
| pipermerriam/web3.py | web3/net.py | Python | mit | 389 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameter values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numbers
import re
import REDACTED
import six
from REDACTED.tensorflow.contrib.training.python.training import hparam_pb2
from REDACTED.tensorflow.python.framework import ops
from REDACTED.tensorflow.python.util import compat
from REDACTED.tensorflow.python.util import deprecation
# Define the regular expression for parsing a single clause of the input
# (delimited by commas). A legal clause looks like:
# <variable name>[<index>]? = <rhs>
# where <rhs> is either a single token or [] enclosed list of tokens.
# For example: "var[1] = a" or "x = [1,2,3]"
PARAM_RE = re.compile(r"""
(?P<name>[a-zA-Z][\w\.]*) # variable name: "var" or "x"
(\[\s*(?P<index>\d+)\s*\])? # (optional) index: "1" or None
\s*=\s*
((?P<val>[^,\[]*) # single value: "a" or None
|
\[(?P<vals>[^\]]*)\]) # list of values: None or "1,2,3"
($|,\s*)""", re.VERBOSE)
def _parse_fail(name, var_type, value, values):
"""Helper function for raising a value error for bad assignment."""
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' %
(name, var_type.__name__, value, values))
def _reuse_fail(name, values):
"""Helper function for raising a value error for reuse of name."""
raise ValueError('Multiple assignments to variable \'%s\' in %s' % (name,
values))
def _process_scalar_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary with a scalar value.
Used to update the results_dictionary to be returned by parse_values when
encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("s" or "arr").
parse_fn: Function for parsing the actual value.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
m_dict['index']: List index value (or None)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has already been used.
"""
try:
parsed_value = parse_fn(m_dict['val'])
except ValueError:
_parse_fail(name, var_type, m_dict['val'], values)
# If no index is provided
if not m_dict['index']:
if name in results_dictionary:
_reuse_fail(name, values)
results_dictionary[name] = parsed_value
else:
if name in results_dictionary:
# The name has already been used as a scalar, then it
# will be in this dictionary and map to a non-dictionary.
if not isinstance(results_dictionary.get(name), dict):
_reuse_fail(name, values)
else:
results_dictionary[name] = {}
index = int(m_dict['index'])
# Make sure the index position hasn't already been assigned a value.
if index in results_dictionary[name]:
_reuse_fail('{}[{}]'.format(name, index), values)
results_dictionary[name][index] = parsed_value
def _process_list_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("arr").
parse_fn: Function for parsing individual values.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has an index or the values cannot be parsed.
"""
if m_dict['index'] is not None:
raise ValueError('Assignment of a list to a list index.')
elements = filter(None, re.split('[ ,]', m_dict['vals']))
# Make sure the name hasn't already been assigned a value
if name in results_dictionary:
raise _reuse_fail(name, values)
try:
results_dictionary[name] = [parse_fn(e) for e in elements]
except ValueError:
_parse_fail(name, var_type, m_dict['vals'], values)
def _cast_to_type_if_compatible(name, param_type, value):
"""Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Raises:
ValueError: If the type of `value` is not compatible with param_type.
* If `param_type` is a string type, but `value` is not.
* If `param_type` is a boolean, but `value` is not, or vice versa.
* If `param_type` is an integer type, but `value` is not.
* If `param_type` is a float type, but `value` is not a numeric type.
"""
fail_msg = (
"Could not cast hparam '%s' of type '%s' from value %r" %
(name, param_type, value))
# If `value` is already of type `param_type`, return it directly.
# `isinstance` is too weak (e.g. isinstance(True, int) == True).
if type(value) == param_type: # pylint: disable=unidiomatic-typecheck
return value
# Some callers use None, for which we can't do any casting/checking. :(
if issubclass(param_type, type(None)):
return value
# Avoid converting a non-string type to a string.
if (issubclass(param_type, (six.string_types, six.binary_type)) and
not isinstance(value, (six.string_types, six.binary_type))):
raise ValueError(fail_msg)
# Avoid converting a number or string type to a boolean or vice versa.
if issubclass(param_type, bool) != isinstance(value, bool):
raise ValueError(fail_msg)
# Avoid converting float to an integer (the reverse is fine).
if (issubclass(param_type, numbers.Integral) and
not isinstance(value, numbers.Integral)):
raise ValueError(fail_msg)
# Avoid converting a non-numeric type to a numeric type.
if (issubclass(param_type, numbers.Number) and
not isinstance(value, numbers.Number)):
raise ValueError(fail_msg)
return param_type(value)
def parse_values(values, type_map, ignore_unknown=False):
"""Parses hyperparameter values from a string into a python map.
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, a ValueError
is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2').
If a hyperparameter name in both an index assignment and scalar assignment,
a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1').
The hyperparameter name may contain '.' symbols, which will result in an
attribute name that is only accessible through the getattr and setattr
functions. (And must be first explicit added through add_hparam.)
WARNING: Use of '.' in your variable names is allowed, but is not well
supported and not recommended.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
When index assignment is used, the corresponding type_map key should be the
list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not
"arr[1]").
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
ignore_unknown: Bool. Whether values that are missing a type in type_map
should be ignored. If set to True, a ValueError will not be raised for
unknown hyperparameter type.
Returns:
A python map mapping each name to either:
* A scalar value.
* A list of scalar values.
* A dictionary mapping index numbers to scalar values.
(e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}")
Raises:
ValueError: If there is a problem with input.
* If `values` cannot be parsed.
* If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]').
* If the same rvalue is assigned two different values (e.g. 'a=1,a=2',
'a[1]=1,a[1]=2', or 'a=1,a=[1]')
"""
results_dictionary = {}
pos = 0
while pos < len(values):
m = PARAM_RE.match(values, pos)
if not m:
raise ValueError('Malformed hyperparameter value: %s' % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
if name not in type_map:
if ignore_unknown:
continue
raise ValueError('Unknown hyperparameter type for %s' % name)
type_ = type_map[name]
# Set up correct parsing function (depending on whether type_ is a bool)
if type_ == bool:
def parse_bool(value):
if value in ['true', 'True']:
return True
elif value in ['false', 'False']:
return False
else:
try:
return bool(int(value))
except ValueError:
_parse_fail(name, type_, value, values)
parse = parse_bool
else:
parse = type_
# If a singe value is provided
if m_dict['val'] is not None:
_process_scalar_value(name, parse, type_, m_dict, values,
results_dictionary)
# If the assigned value is a list:
elif m_dict['vals'] is not None:
_process_list_value(name, parse, type_, m_dict, values,
results_dictionary)
else: # Not assigned a list or value
_parse_fail(name, type_, '', values)
return results_dictionary
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs.
A `HParams` object holds hyperparameters used to build and train a model,
such as the number of hidden units in a neural net layer or the learning rate
to use when training.
You first create a `HParams` object by specifying the names and values of the
hyperparameters.
To make them easily accessible the parameter names are added as direct
attributes of the class. A typical usage is as follows:
```python
# Create a HParams object specifying names and values of the model
# hyperparameters:
hparams = HParams(learning_rate=0.1, num_hidden_units=100)
# The hyperparameter are available as attributes of the HParams object:
hparams.learning_rate ==> 0.1
hparams.num_hidden_units ==> 100
```
Hyperparameters have type, which is inferred from the type of their value
passed at construction type. The currently supported types are: integer,
float, boolean, string, and list of integer, float, boolean, or string.
You can override hyperparameter values by calling the
[`parse()`](#HParams.parse) method, passing a string of comma separated
`name=value` pairs. This is intended to make it possible to override
any hyperparameter values from a single command-line flag to which
the user passes 'hyper-param=value' pairs. It avoids having to define
one flag for each hyperparameter.
The syntax expected for each value depends on the type of the parameter.
See `parse()` for a description of the syntax.
Example:
```python
# Define a command line flag to pass name=value pairs.
# For example using argparse:
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma separated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():
# Create a HParams object specifying the names and values of the
# model hyperparameters:
hparams = tf.contrib.training.HParams(
learning_rate=0.1,
num_hidden_units=100,
activations=['relu', 'tanh'])
# Override hyperparameters values by parsing the command line
hparams.parse(args.hparams)
# If the user passed `--hparams=learning_rate=0.3` on the command line
# then 'hparams' has the following attributes:
hparams.learning_rate ==> 0.3
hparams.num_hidden_units ==> 100
hparams.activations ==> ['relu', 'tanh']
# If the hyperparameters are in json format use parse_json:
hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}')
```
"""
_HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks.
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = tf.contrib.training.HParams(
learning_rate=0.1, num_hidden_units=100, activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef
protocol buffer. If provided, this object is initialized by
deserializing hparam_def. Otherwise **kwargs is used.
model_structure: An instance of ModelStructure, defining the feature
crosses to be used in the Trial.
**kwargs: Key-value pairs where the key is the hyperparameter name and
the value is the value for the parameter.
Raises:
ValueError: If both `hparam_def` and initialization values are provided,
or if one of the arguments is invalid.
"""
# Register the hyperparameters and their type in _hparam_types.
# This simplifies the implementation of parse().
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
self._model_structure = model_structure
if hparam_def:
self._init_from_proto(hparam_def)
if kwargs:
raise ValueError('hparam_def and initialization values are '
'mutually exclusive')
else:
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def _init_from_proto(self, hparam_def):
"""Creates a new HParams from `HParamDef` protocol buffer.
Args:
hparam_def: `HParamDef` protocol buffer.
"""
assert isinstance(hparam_def, hparam_pb2.HParamDef)
for name, value in hparam_def.hparam.items():
kind = value.WhichOneof('kind')
if kind.endswith('_value'):
# Single value.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, int(getattr(value, kind)))
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, compat.as_str(getattr(value, kind)))
else:
self.add_hparam(name, getattr(value, kind))
else:
# List of values.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, [int(v) for v in getattr(value, kind).value])
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(
name, [compat.as_str(v) for v in getattr(value, kind).value])
else:
self.add_hparam(name, [v for v in getattr(value, kind).value])
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# hyperparameter name.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError(
'Multi-valued hyperparameters cannot be empty: %s' % name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
"""
param_type, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, [
_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
def del_hparam(self, name):
"""Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter.
"""
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name]
def parse(self, values):
"""Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed or a hyperparameter in `values`
doesn't exist.
"""
type_map = {}
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self.override_from_dict(values_map)
def override_from_dict(self, values_dict):
"""Override existing hyperparameter values, parsing new values from a dictionary.
Args:
values_dict: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_dict` doesn't exist.
ValueError: If `values_dict` cannot be parsed.
"""
for name, value in values_dict.items():
self.set_hparam(name, value)
return self
@deprecation.deprecated(None, 'Use `override_from_dict`.')
def set_from_map(self, values_map):
"""DEPRECATED. Use override_from_dict."""
return self.override_from_dict(values_dict=values_map)
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self, indent=None, separators=None, sort_keys=False):
"""Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert newlines. `None` (the default) selects the
most compact representation.
separators: Optional `(item_separator, key_separator)` tuple. Default is
`(', ', ': ')`.
sort_keys: If `True`, the output dictionaries will be sorted by key.
Returns:
A JSON string.
"""
return json.dumps(
self.values(),
indent=indent,
separators=separators,
sort_keys=sort_keys)
def parse_json(self, values_json):
"""Override existing hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_json` doesn't exist.
ValueError: If `values_json` cannot be parsed.
"""
values_map = json.loads(values_json)
return self.override_from_dict(values_map)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def get(self, key, default=None):
"""Returns the value of `key` if it exists, else `default`."""
if key in self._hparam_types:
# Ensure that default is compatible with the parameter type.
if default is not None:
param_type, is_param_list = self._hparam_types[key]
type_str = 'list<%s>' % param_type if is_param_list else str(param_type)
fail_msg = ("Hparam '%s' of type '%s' is incompatible with "
'default=%s' % (key, type_str, default))
is_default_list = isinstance(default, list)
if is_param_list != is_default_list:
raise ValueError(fail_msg)
try:
if is_default_list:
for value in default:
_cast_to_type_if_compatible(key, param_type, value)
else:
_cast_to_type_if_compatible(key, param_type, default)
except ValueError as e:
raise ValueError('%s. %s' % (fail_msg, e))
return getattr(self, key)
return default
def __contains__(self, key):
return key in self._hparam_types
def __str__(self):
hpdict = self.values()
output_list = ['{}={}'.format(key, hpdict[key]) for key in hpdict]
return ','.join(output_list)
def __repr__(self):
strval = str(sorted(self.values().items()))
return '%s(%s)' % (type(self).__name__, strval)
@staticmethod
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
"""
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])
def to_proto(self, export_scope=None): # pylint: disable=unused-argument
"""Converts a `HParams` object to a `HParamDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `HParamDef` protocol buffer.
"""
hparam_proto = hparam_pb2.HParamDef()
for name in self._hparam_types:
# Parse the values.
param_type, is_list = self._hparam_types.get(name, (None, None))
kind = HParams._get_kind_name(param_type, is_list)
if is_list:
if kind.startswith('bytes'):
v_list = [compat.as_bytes(v) for v in getattr(self, name)]
else:
v_list = [v for v in getattr(self, name)]
getattr(hparam_proto.hparam[name], kind).value.extend(v_list)
else:
v = getattr(self, name)
if kind.startswith('bytes'):
v = compat.as_bytes(getattr(self, name))
setattr(hparam_proto.hparam[name], kind, v)
return hparam_proto
@staticmethod
def from_proto(hparam_def, import_scope=None): # pylint: disable=unused-argument
return HParams(hparam_def=hparam_def)
ops.register_proto_function(
'hparams',
proto_type=hparam_pb2.HParamDef,
to_proto=HParams.to_proto,
from_proto=HParams.from_proto)
| mlperf/training_results_v0.7 | Google/benchmarks/bert/implementations/bert-research-TF-tpu-v4-16/hparam.py | Python | apache-2.0 | 27,205 |
"""
Third Party Auth REST API views
"""
from collections import namedtuple
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import Http404
from django.urls import reverse
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from edx_rest_framework_extensions.auth.session.authentication import SessionAuthenticationAllowInactiveUser
from rest_framework import exceptions, permissions, status, throttling
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from social_django.models import UserSocialAuth
from openedx.core.lib.api.authentication import (
BearerAuthentication,
BearerAuthenticationAllowInactiveUser
)
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission
from third_party_auth import pipeline
from third_party_auth.api import serializers
from third_party_auth.api.permissions import TPA_PERMISSIONS
from third_party_auth.provider import Registry
from common.djangoapps.third_party_auth.api.utils import filter_user_social_auth_queryset_by_provider
class ProviderBaseThrottle(throttling.UserRateThrottle):
"""
Base throttle for provider queries
"""
def allow_request(self, request, view):
"""
Only throttle unprivileged requests.
"""
if view.is_unprivileged_query(request, view.get_identifier_for_requested_user(request)):
return super(ProviderBaseThrottle, self).allow_request(request, view)
return True
class ProviderBurstThrottle(ProviderBaseThrottle):
"""
Maximum number of provider requests in a quick burst.
"""
rate = settings.TPA_PROVIDER_BURST_THROTTLE # Default '10/min'
class ProviderSustainedThrottle(ProviderBaseThrottle):
"""
Maximum number of provider requests over time.
"""
rate = settings.TPA_PROVIDER_SUSTAINED_THROTTLE # Default '50/day'
class BaseUserView(APIView):
"""
Common core of UserView and UserViewV2
"""
identifier = namedtuple('identifier', ['kind', 'value'])
identifier_kinds = ['email', 'username']
authentication_classes = (
# Users may want to view/edit the providers used for authentication before they've
# activated their account, so we allow inactive users.
BearerAuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser,
)
throttle_classes = [ProviderSustainedThrottle, ProviderBurstThrottle]
def do_get(self, request, identifier):
"""
Fulfill the request, now that the identifier has been specified.
"""
is_unprivileged = self.is_unprivileged_query(request, identifier)
if is_unprivileged:
if not getattr(settings, 'ALLOW_UNPRIVILEGED_SSO_PROVIDER_QUERY', False):
return Response(status=status.HTTP_403_FORBIDDEN)
try:
user = User.objects.get(**{identifier.kind: identifier.value})
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
providers = pipeline.get_provider_user_states(user)
active_providers = [
self.get_provider_data(assoc, is_unprivileged)
for assoc in providers if assoc.has_account
]
# In the future this can be trivially modified to return the inactive/disconnected providers as well.
return Response({
"active": active_providers
})
def get_provider_data(self, assoc, is_unprivileged):
"""
Return the data for the specified provider.
If the request is unprivileged, do not return the remote ID of the user.
"""
provider_data = {
"provider_id": assoc.provider.provider_id,
"name": assoc.provider.name,
}
if not is_unprivileged:
provider_data["remote_id"] = assoc.remote_id
return provider_data
def is_unprivileged_query(self, request, identifier):
"""
Return True if a non-superuser requests information about another user.
Params must be a dict that includes only one of 'username' or 'email'
"""
if identifier.kind not in self.identifier_kinds:
# This is already checked before we get here, so raise a 500 error
# if the check fails.
raise ValueError(u"Identifier kind {} not in {}".format(identifier.kind, self.identifier_kinds))
self_request = False
if identifier == self.identifier('username', request.user.username):
self_request = True
elif identifier.kind == 'email' and getattr(identifier, 'value', object()) == request.user.email:
# AnonymousUser does not have an email attribute, so fall back to
# something that will never compare equal to the provided email.
self_request = True
if self_request:
# We can always ask for our own provider
return False
# We are querying permissions for a user other than the current user.
if not request.user.is_superuser and not ApiKeyHeaderPermission().has_permission(request, self):
# The user does not have elevated permissions.
return True
return False
class UserView(BaseUserView):
"""
List the third party auth accounts linked to the specified user account.
[DEPRECATED]
This view uses heuristics to guess whether the provided identifier is a
username or email address. Instead, use /api/third_party_auth/v0/users/
and specify ?username=foo or ?email=foo@exmaple.com.
**Example Request**
GET /api/third_party_auth/v0/users/{username}
GET /api/third_party_auth/v0/users/{email@example.com}
**Response Values**
If the request for information about the user is successful, an HTTP 200 "OK" response
is returned.
The HTTP 200 response has the following values.
* active: A list of all the third party auth providers currently linked
to the given user's account. Each object in this list has the
following attributes:
* provider_id: The unique identifier of this provider (string)
* name: The name of this provider (string)
* remote_id: The ID of the user according to the provider. This ID
is what is used to link the user to their edX account during
login.
"""
def get(self, request, username):
"""Read provider information for a user.
Allows reading the list of providers for a specified user.
Args:
request (Request): The HTTP GET request
username (str): Fetch the list of providers linked to this user
Return:
JSON serialized list of the providers linked to this user.
"""
identifier = self.get_identifier_for_requested_user(request)
return self.do_get(request, identifier)
def get_identifier_for_requested_user(self, _request):
"""
Return an identifier namedtuple for the requested user.
"""
if u'@' in self.kwargs[u'username']:
id_kind = u'email'
else:
id_kind = u'username'
return self.identifier(id_kind, self.kwargs[u'username'])
# TODO: When removing deprecated UserView, rename this view to UserView.
class UserViewV2(BaseUserView):
"""
List the third party auth accounts linked to the specified user account.
**Example Request**
GET /api/third_party_auth/v0/users/?username={username}
GET /api/third_party_auth/v0/users/?email={email@example.com}
**Response Values**
If the request for information about the user is successful, an HTTP 200 "OK" response
is returned.
The HTTP 200 response has the following values.
* active: A list of all the third party auth providers currently linked
to the given user's account. Each object in this list has the
following attributes:
* provider_id: The unique identifier of this provider (string)
* name: The name of this provider (string)
* remote_id: The ID of the user according to the provider. This ID
is what is used to link the user to their edX account during
login.
"""
def get(self, request):
"""
Read provider information for a user.
Allows reading the list of providers for a specified user.
Args:
request (Request): The HTTP GET request
Request Parameters:
Must provide one of 'email' or 'username'. If both are provided,
the username will be ignored.
Return:
JSON serialized list of the providers linked to this user.
"""
identifier = self.get_identifier_for_requested_user(request)
return self.do_get(request, identifier)
def get_identifier_for_requested_user(self, request):
"""
Return an identifier namedtuple for the requested user.
"""
identifier = None
for id_kind in self.identifier_kinds:
if id_kind in request.GET:
identifier = self.identifier(id_kind, request.GET[id_kind])
break
if identifier is None:
raise exceptions.ValidationError(u"Must provide one of {}".format(self.identifier_kinds))
return identifier
class UserMappingView(ListAPIView):
"""
Map between the third party auth account IDs (remote_id) and EdX username.
This API is intended to be a server-to-server endpoint. An on-campus middleware or system should consume this.
**Use Case**
Get a paginated list of mappings between edX users and remote user IDs for all users currently
linked to the given backend.
The list can be filtered by edx username or third party ids. The filter is limited by the max length of URL.
It is suggested to query no more than 50 usernames or remote_ids in each request to stay within above
limitation
The page size can be changed by specifying `page_size` parameter in the request.
**Example Requests**
GET /api/third_party_auth/v0/providers/{provider_id}/users
GET /api/third_party_auth/v0/providers/{provider_id}/users?username={username1},{username2}
GET /api/third_party_auth/v0/providers/{provider_id}/users?username={username1}&usernames={username2}
GET /api/third_party_auth/v0/providers/{provider_id}/users?remote_id={remote_id1},{remote_id2}
GET /api/third_party_auth/v0/providers/{provider_id}/users?remote_id={remote_id1}&remote_id={remote_id2}
GET /api/third_party_auth/v0/providers/{provider_id}/users?username={username1}&remote_id={remote_id1}
**URL Parameters**
* provider_id: The unique identifier of third_party_auth provider (e.g. "saml-ubc", "oa2-google", etc.
This is not the same thing as the backend_name.). (Optional/future: We may also want to allow
this to be an 'external domain' like 'ssl:MIT' so that this API can also search the legacy
ExternalAuthMap table used by Standford/MIT)
**Query Parameters**
* remote_ids: Optional. List of comma separated remote (third party) user IDs to filter the result set.
e.g. ?remote_ids=8721384623
* usernames: Optional. List of comma separated edX usernames to filter the result set.
e.g. ?usernames=bob123,jane456
* page, page_size: Optional. Used for paging the result set, especially when getting
an unfiltered list.
**Response Values**
If the request for information about the user is successful, an HTTP 200 "OK" response
is returned.
The HTTP 200 response has the following values:
* count: The number of mappings for the backend.
* next: The URI to the next page of the mappings.
* previous: The URI to the previous page of the mappings.
* num_pages: The number of pages listing the mappings.
* results: A list of mappings returned. Each collection in the list
contains these fields.
* username: The edx username
* remote_id: The Id from third party auth provider
"""
authentication_classes = (JwtAuthentication, BearerAuthentication, )
permission_classes = (TPA_PERMISSIONS, )
required_scopes = ['tpa:read']
serializer_class = serializers.UserMappingSerializer
provider = None
def get_queryset(self):
provider_id = self.kwargs.get('provider_id')
# provider existence checking
self.provider = Registry.get(provider_id)
if not self.provider:
raise Http404
query_set = filter_user_social_auth_queryset_by_provider(
UserSocialAuth.objects.select_related('user'),
self.provider,
)
query = Q()
usernames = self.request.query_params.getlist('username', None)
remote_ids = self.request.query_params.getlist('remote_id', None)
if usernames:
usernames = ','.join(usernames)
usernames = set(usernames.split(',')) if usernames else set()
if usernames:
query = query | Q(user__username__in=usernames)
if remote_ids:
remote_ids = ','.join(remote_ids)
remote_ids = set(remote_ids.split(',')) if remote_ids else set()
if remote_ids:
query = query | Q(uid__in=[self.provider.get_social_auth_uid(remote_id) for remote_id in remote_ids])
return query_set.filter(query)
def get_serializer_context(self):
"""
Extra context provided to the serializer class with current provider. We need the provider to
remove idp_slug from the remote_id if there is any
"""
context = super(UserMappingView, self).get_serializer_context()
context['provider'] = self.provider
return context
class ThirdPartyAuthUserStatusView(APIView):
"""
Provides an API endpoint for retrieving the linked status of the authenticated
user with respect to the third party auth providers configured in the system.
"""
authentication_classes = (
JwtAuthentication, BearerAuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser
)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
"""
GET /api/third_party_auth/v0/providers/user_status/
**GET Response Values**
```
{
"accepts_logins": true,
"name": "Google",
"disconnect_url": "/auth/disconnect/google-oauth2/?",
"connect_url": "/auth/login/google-oauth2/?auth_entry=account_settings&next=%2Faccount%2Fsettings",
"connected": false,
"id": "oa2-google-oauth2"
}
```
"""
tpa_states = []
for state in pipeline.get_provider_user_states(request.user):
# We only want to include providers if they are either currently available to be logged
# in with, or if the user is already authenticated with them.
if state.provider.display_for_login or state.has_account:
tpa_states.append({
'id': state.provider.provider_id,
'name': state.provider.name, # The name of the provider e.g. Facebook
'connected': state.has_account, # Whether the user's edX account is connected with the provider.
# If the user is not connected, they should be directed to this page to authenticate
# with the particular provider, as long as the provider supports initiating a login.
'connect_url': pipeline.get_login_url(
state.provider.provider_id,
pipeline.AUTH_ENTRY_ACCOUNT_SETTINGS,
# The url the user should be directed to after the auth process has completed.
redirect_url=reverse('account_settings'),
),
'accepts_logins': state.provider.accepts_logins,
# If the user is connected, sending a POST request to this url removes the connection
# information for this provider from their edX account.
'disconnect_url': pipeline.get_disconnect_url(state.provider.provider_id, state.association_id),
})
return Response(tpa_states)
| msegado/edx-platform | common/djangoapps/third_party_auth/api/views.py | Python | agpl-3.0 | 16,714 |
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2018 DESY, Jan Kotanski <jkotan@mail.desy.de>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
#
""" Creator of XML configuration files """
import PyTango
import sys
import lxml.etree
if sys.version_info > (3,):
unicode = str
def _tostr(text):
""" convert bytestr or unicode to python str
:param text: text to convert
:type text: :obj:`bytes` or :obj:`unicode` or :obj:`str`
:returns: converted text
:rtype: :obj:`str`
"""
if isinstance(text, str):
return text
else:
if sys.version_info > (3,) and \
(isinstance(text, bytes) or isinstance(text, unicode)):
return str(text, "utf8")
else:
return str(text)
class NTag(object):
""" tag wrapper
"""
#
def __init__(self, parent, tagName, nameAttr="", typeAttr=""):
""" constructor
:param parent: parent tag element
:type parent: :class:`NTag`
:param tagName: tag name
:type tagName: :obj:`str`
:param nameAttr: value of name attribute
:type nameAttr: :obj:`str`
:param typeAttr: value of type attribute
:type typeAttr: :obj:`str`
"""
#: (:class:`lxml.etree.Element`) tag element from etree
self.elem = lxml.etree.Element(tagName)
parent.elem.append(self.elem)
if nameAttr != "":
self.elem.attrib["name"] = nameAttr
if typeAttr != "":
self.elem.attrib["type"] = typeAttr
def addTagAttr(self, name, value):
""" adds tag attribute
:param name: attribute name
:type name: :obj:`str`
:param value: attribute value
:type value: :obj:`str`
"""
self.elem.attrib[name] = value
def setText(self, text):
""" sets tag content
:param text: tag content
:type text: :obj:`str`
"""
self.elem.text = text
def addText(self, text):
""" adds tag content
:param text: tag content
:type text: :obj:`str`
"""
self.elem.text = self.elem.text + text
class NAttr(NTag):
""" Attribute tag wrapper
"""
def __init__(self, parent, nameAttr, typeAttr=""):
""" constructor
:param parent: parent tag element
:type parent: :class:`NTag`
:param nameAttr: name attribute
:type nameAttr: :obj:`str`
:param typeAttr: type attribute
:type typeAttr: :obj:`str`
"""
NTag.__init__(self, parent, "attribute", nameAttr, typeAttr)
def setStrategy(self, mode="STEP", trigger=None, value=None, canfail=None):
""" sets the attribute strategy
:param mode: mode data writing, i.e. INIT, STEP, FINAL, POSTRUN
:type mode: :obj:`str`
:param trigger: for asynchronous writting,
e.g. with different subentries
:type trigger: :obj:`str`
:param value: label for postrun mode
:type value: :obj:`str`
:param canfail: can fail strategy flag
:type canfail: :obj:`bool`
"""
#: strategy of data writing, i.e. INIT, STEP, FINAL, POSTRUN
strategy = NTag(self, "strategy")
if strategy:
strategy.addTagAttr("mode", mode)
if trigger:
strategy.addTagAttr("trigger", trigger)
if canfail:
strategy.addTagAttr("canfail", "true")
if value:
strategy.setText(value)
class NGroup(NTag):
""" Group tag wrapper
"""
def __init__(self, parent, nameAttr, typeAttr=""):
""" constructor
:param parent: parent tag element
:type parent: :class:`NTag`
:param nameAttr: name attribute
:type nameAttr: :obj:`str`
:param typeAttr: type attribute
:type typeAttr: :obj:`str`
"""
NTag.__init__(self, parent, "group", nameAttr, typeAttr)
#: (:obj:`list` <:obj:`str`> ) list of doc tag contents
self._doc = []
#: (:obj:`dict` <:obj:`str` , :obj:`str`> )
#: container with attribute tag wrappers
self._gAttr = {}
def addDoc(self, doc):
""" adds doc tag content
:param doc: doc tag content
:type doc: :obj:`str`
"""
self._doc.append(NTag(self, "doc"))
self._doc[-1].addText(doc)
def addAttr(self, attrName, attrType, attrValue=""):
"""adds attribute: tag
:param attrName: name attribute
:type attrName: :obj:`str`
:param attrType: type attribute
:type attrType: :obj:`str`
:param attrValue: content of the attribute tag
:type attrValue: :obj:`str`
"""
print("%s %s %s" % (attrName, attrType, attrValue))
at = NAttr(self, attrName, attrType)
self._gAttr[attrName] = at
if attrValue != "":
at.setText(attrValue)
return self._gAttr[attrName]
class NLink(NTag):
""" Link tag wrapper
"""
def __init__(self, parent, nameAttr, gTarget):
""" constructor
:param parent: parent tag element
:type parent: :class:`NTag`
:param nameAttr: name attribute
:type nameAttr: :obj:`str`
:param gTarget: target attribute
:type gTarget: :obj:`str`
"""
NTag.__init__(self, parent, "link", nameAttr)
self.addTagAttr("target", gTarget)
#: list of doc tag contents
self._doc = []
def addDoc(self, doc):
""" adds doc tag content
:param doc: doc tag content
:type doc: :obj:`str`
"""
self._doc.append(NTag(self, "doc"))
self._doc[-1].addText(doc)
class NDimensions(NTag):
""" Dimensions tag wrapper
"""
def __init__(self, parent, rankAttr):
""" constructor
:param parent: parent tag element
:type parent: :class:`NTag`
:param rankAttr: rank attribute
:type rankAttr: :obj:`str`
"""
NTag.__init__(self, parent, "dimensions")
self.addTagAttr("rank", rankAttr)
#: (:obj:`dict` <:obj:`str`, :class:`NDim`>)
#: container with dim tag wrapper
self.dims = {}
def dim(self, indexAttr, valueAttr):
""" adds dim tag
:param indexAttr: index attribute
:type indexAttr: :obj:`str`
:param valueAttr: value attribute
:type valueAttr: :obj:`str`
"""
self.dims[indexAttr] = NDim(self, indexAttr, valueAttr)
class NDim(NTag):
""" Dim tag wrapper
"""
def __init__(self, parent, indexAttr, valueAttr):
""" constructor
:param parent: parent tag element
:type parent: :class:`NTag`
:param indexAttr: index attribute
:type indexAttr: :obj:`str`
:param valueAttr: value attribute
:type valueAttr: :obj:`str`
"""
NTag.__init__(self, parent, "dim")
self.addTagAttr("index", indexAttr)
self.addTagAttr("value", valueAttr)
class NField(NTag):
""" Field tag wrapper
"""
def __init__(self, parent, nameAttr, typeAttr=""):
"""constructor
:param parent: parent tag element
:type parent: :class:`NTag`
:param nameAttr: name attribute
:type nameAttr: :obj:`str`
:param typeAttr: type attribute
:type typeAttr: :obj:`str`
"""
NTag.__init__(self, parent, "field", nameAttr, typeAttr)
#: (:obj:`list` <:obj:`str`> ) list of doc tag contents
self._doc = []
#: (:obj:`dict` <:obj:`str` , :obj:`str`> )
#: container with attribute tag wrappers
self._attr = {}
def setStrategy(self, mode="STEP", trigger=None, value=None,
grows=None, compression=False, rate=None,
shuffle=None, canfail=None, compression_opts=None):
""" sets the field strategy
:param mode: mode data writing, i.e. INIT, STEP, FINAL, POSTRUN
:type mode: :obj:`str`
:param trigger: for asynchronous writting,
e.g. with different subentries
:type trigger: :obj:`str`
:param value: label for postrun mode
:type value: :obj:`str`
:param grows: growing dimension
:type grows: :obj:`str`
:param compression: flag if compression shuold be applied
:type compression: :obj:`str`
:param rate: compression rate
:type rate: :obj:`str`
:param shuffle: flag if compression shuffle
:type shuffle: :obj:`str`
:param canfail: can fail strategy flag
:type canfail: :obj:`bool`
"""
#: strategy of data writing, i.e. INIT, STEP, FINAL, POSTRUN
strategy = NTag(self, "strategy")
if strategy:
strategy.addTagAttr("mode", mode)
if grows:
strategy.addTagAttr("grows", grows)
if trigger:
strategy.addTagAttr("trigger", trigger)
if value:
strategy.setText(value)
if canfail:
strategy.addTagAttr("canfail", "true")
if compression:
if int(compression) == 1:
strategy.addTagAttr("compression", "true")
if rate is not None:
strategy.addTagAttr("rate", str(rate))
else:
strategy.addTagAttr(
"compression", int(compression))
if compression_opts:
strategy.addTagAttr(
"compression_opts",
",".join([str(opts) for opts in compression_opts]))
if shuffle is not None:
strategy.addTagAttr(
"shuffle",
"true" if shuffle else "false")
def setUnits(self, unitsAttr):
""" sets the field unit
:param unitsAttr: the field unit
:type unitsAttr: :obj:`str`
"""
self.addTagAttr("units", unitsAttr)
def addDoc(self, doc):
""" adds doc tag content
:param doc: doc tag content
:type doc: :obj:`str`
"""
self._doc.append(NTag(self, "doc"))
self._doc[-1].addText(doc)
def addAttr(self, attrName, attrType, attrValue=""):
""" adds attribute tag
:param attrName: name attribute
:type attrName: :obj:`str`
:param attrType: type attribute
:type attrType: :obj:`str`
:param attrValue: content of the attribute tag
:type attrValue: :obj:`str`
"""
self._attr[attrName] = NAttr(self, attrName, attrType)
if attrValue != '':
self._attr[attrName].setText(attrValue)
return self._attr[attrName]
class NDSource(NTag):
""" Source tag wrapper
"""
def __init__(self, parent):
""" constructor
:param parent: parent tag element
:type parent: :class:`NTag`
"""
NTag.__init__(self, parent, "datasource")
#: list of doc tag contents
self._doc = []
def initDBase(self, name, dbtype, query, dbname=None, rank=None,
mycnf=None, user=None,
passwd=None, dsn=None, mode=None, host=None, port=None):
""" sets parameters of DataBase
:param name: name of datasource
:type name: :obj:`str`
:param dbname: name of used DataBase
:type dbname: :obj:`str`
:param query: database query
:type query: :obj:`str`
:param dbtype: type of the database, i.e. MYSQL, PGSQL, ORACLE
:type dbtype: :obj:`str`
:param rank: rank of the query output, i.e. SCALAR, SPECTRUM, IMAGE
:type rank: :obj:`str`
:param mycnf: MYSQL config file
:type mycnf: :obj:`str`
:param user: database user name
:type user: :obj:`str`
:param passwd: database user password
:type passwd: :obj:`str`
:param dsn: DSN string to initialize ORACLE and PGSQL databases
:type dsn: :obj:`str`
:param mode: mode for ORACLE databases, i.e. SYSDBA or SYSOPER
:type mode: :obj:`str`
:param host: name of the host
:type host: :obj:`str`
:param port: port number
:type port: :obj:`str`
"""
self.addTagAttr("type", "DB")
self.addTagAttr("name", name)
da = NTag(self, "database")
da.addTagAttr("dbtype", dbtype)
if host:
da.addTagAttr("hostname", host)
if port:
da.addTagAttr("port", port)
if dbname:
da.addTagAttr("dbname", dbname)
if user:
da.addTagAttr("user", user)
if passwd:
da.addTagAttr("passwd", passwd)
if mycnf:
da.addTagAttr("mycnf", mycnf)
if mode:
da.addTagAttr("mode", mode)
if dsn:
da.addText(dsn)
da = NTag(self, "query")
if rank:
da.addTagAttr("format", rank)
da.addText(query)
def initTango(self, name, device, memberType, recordName, host=None,
port=None, encoding=None, group=None):
""" sets paramters for Tango device
:param name: name of datasource
:type name: :obj:`str`
:param device: device name
:type device: :obj:`str`
:param memberType: type of the data object, i.e. attribute,
property, command
:type memberType: :obj:`str`
:param recordName: name of the data object
:type recordName: :obj:`str`
:param host: host name
:type host: :obj:`str`
:param port: port
:type port: :obj:`str`
:param encoding: encoding of DevEncoded data
:type encoding: :obj:`str`
"""
self.addTagAttr("type", "TANGO")
self.addTagAttr("name", name)
dv = NTag(self, "device")
dv.addTagAttr("name", device)
if memberType:
dv.addTagAttr("member", memberType)
if host:
dv.addTagAttr("hostname", host)
if port:
dv.addTagAttr("port", port)
if encoding:
dv.addTagAttr("encoding", encoding)
if group:
dv.addTagAttr("group", group)
da = NTag(self, "record")
da.addTagAttr("name", recordName)
def initClient(self, name, recordName):
""" sets paramters for Client data
:param name: name of datasource
:type name: :obj:`str`
:param recordName: name of the data object
:type recordName: :obj:`str`
"""
self.addTagAttr("type", "CLIENT")
self.addTagAttr("name", name)
da = NTag(self, "record")
da.addTagAttr("name", recordName)
def addDoc(self, doc):
""" adds doc tag content
:param doc: doc tag content
:type doc: :obj:`str`
"""
self._doc.append(NTag(self, "doc"))
self._doc[-1].addText(doc)
class NDeviceGroup(NGroup):
""" Tango device tag creator
"""
#: (:obj:`list` < :obj:`str`>) Tango types
tTypes = ["DevVoid",
"DevBoolean",
"DevShort",
"DevLong",
"DevFloat",
"DevDouble",
"DevUShort",
"DevULong",
"DevString",
"DevVarCharArray",
"DevVarShortArray",
"DevVarLongArray",
"DevVarFloatArray",
"DevVarDoubleArray",
"DevVarUShortArray",
"DevVarULongArray",
"DevVarStringArray",
"DevVarLongStringArray",
"DevVarDoubleStringArray",
"DevState",
"ConstDevString",
"DevVarBooleanArray",
"DevUChar",
"DevLong64",
"DevULong64",
"DevVarLong64Array",
"DevVarULong64Array",
"DevInt",
"DevEncoded"]
#: (:obj:`list` <:obj:`str`>) NeXuS types corresponding to the Tango types
nTypes = ["NX_CHAR",
"NX_BOOLEAN",
"NX_INT32",
"NX_INT32",
"NX_FLOAT32",
"NX_FLOAT64",
"NX_UINT32",
"NX_UINT32",
"NX_CHAR",
"NX_CHAR",
"NX_INT32",
"NX_INT32",
"NX_FLOAT32",
"NX_FLOAT64",
"NX_UINT32",
"NX_UINT32",
"NX_CHAR",
"NX_CHAR",
"NX_CHAR",
"NX_CHAR",
"NX_CHAR",
"NX_BOOLEAN",
"NX_CHAR",
"NX_INT64",
"NX_UINT64",
"NX_INT64",
"NX_UINT64",
"NX_INT32",
"NX_CHAR"]
def __init__(self, parent, deviceName, nameAttr, typeAttr="",
commands=True, blackAttrs=None):
""" constructor
:param parent: parent tag element
:type parent: :class:`NTag`
:param deviceName: tango device name
:type deviceName: :obj:`str`
:param nameAttr: name attribute
:type nameAttr: :obj:`str`
:param typeAttr: type attribute
:type typeAttr: :obj:`str`
:param commands: if we call the commands
:type commands: :obj:`bool`
:param blackAttrs: list of excluded attributes
:type blackAttrs: :obj:`list` <:obj:`str`>
"""
NGroup.__init__(self, parent, nameAttr, typeAttr)
#: (:class:`PyTango.DeviceProxy`) device proxy
self._proxy = PyTango.DeviceProxy(deviceName)
#: (:obj:`dict` <:obj:`str`, :class:`NTag`>) fields of the device
self._fields = {}
#: (:obj:`list` <:obj:`str`>) blacklist for Attributes
self._blackAttrs = blackAttrs if blackAttrs else []
#: (:obj:`str`) the device name
self._deviceName = deviceName
self._fetchProperties()
self._fetchAttributes()
if commands:
self._fetchCommands()
def _fetchProperties(self):
""" fetches properties
:brief: It collects the device properties
"""
prop = self._proxy.get_property_list('*')
print("PROPERIES %s" % prop)
for pr in prop:
self.addAttr(pr, "NX_CHAR",
str(self._proxy.get_property(pr)[pr][0]))
if pr not in self._fields:
self._fields[pr] = NField(self, pr, "NX_CHAR")
self._fields[pr].setStrategy("STEP")
sr = NDSource(self._fields[pr])
sr.initTango(
self._deviceName, self._deviceName, "property",
pr, host="haso228k.desy.de", port="10000")
def _fetchAttributes(self):
""" fetches Attributes
:brief: collects the device attributes
"""
#: device attirbutes
attr = self._proxy.get_attribute_list()
for at in attr:
print(at)
cf = self._proxy.attribute_query(at)
print("QUERY")
print(cf)
print(cf.name)
print(cf.data_format)
print(cf.standard_unit)
print(cf.display_unit)
print(cf.unit)
print(self.tTypes[cf.data_type])
print(self.nTypes[cf.data_type])
print(cf.data_type)
if at not in self._fields and at not in self._blackAttrs:
self._fields[at] = NField(self, at, self.nTypes[cf.data_type])
encoding = None
if str(cf.data_format).split('.')[-1] == "SPECTRUM":
da = self._proxy.read_attribute(at)
d = NDimensions(self._fields[at], "1")
d.dim("1", str(da.dim_x))
if str(da.type) == 'DevEncoded':
encoding = 'VDEO'
if str(cf.data_format).split('.')[-1] == "IMAGE":
da = self._proxy.read_attribute(at)
d = NDimensions(self._fields[at], "2")
d.dim("1", str(da.dim_x))
d.dim("2", str(da.dim_y))
if str(da.type) == 'DevEncoded':
encoding = 'VDEO'
if cf.unit != 'No unit':
self._fields[at].setUnits(cf.unit)
self._fields[at].setUnits(cf.unit)
if cf.description != 'No description':
self._fields[at].addDoc(cf.description)
self.addAttr('URL', "NX_CHAR", "tango://" + self._deviceName)
self._fields[at].setStrategy("STEP")
sr = NDSource(self._fields[at])
sr.initTango(self._deviceName, self._deviceName, "attribute",
at, host="haso228k.desy.de", port="10000",
encoding=encoding)
def _fetchCommands(self):
""" fetches commands
:brief: It collects results of the device commands
"""
#: list of the device commands
cmd = self._proxy.command_list_query()
print("COMMANDS %s" % cmd)
for cd in cmd:
if str(cd.in_type).split(".")[-1] == "DevVoid" \
and str(cd.out_type).split(".")[-1] != "DevVoid" \
and str(cd.out_type).split(".")[-1] in self.tTypes \
and cd.cmd_name not in self._fields:
self._fields[cd.cmd_name] = \
NField(
self, cd.cmd_name,
self.nTypes[self.tTypes.index(
str(cd.out_type).split(".")[-1])])
self._fields[cd.cmd_name].setStrategy("STEP")
sr = NDSource(self._fields[cd.cmd_name])
sr.initTango(self._deviceName, self._deviceName,
"command", cd.cmd_name,
host="haso228k.desy.de", port="10000")
class XMLFile(object):
""" XML file object
"""
def __init__(self, fname):
""" constructor
:param fname: XML file name
:type fname: :obj:`str`
"""
#: (:obj:`str`) XML file name
self.fname = fname
#: (:class:`lxml.etree.Element`) XML root instance
self.elem = lxml.etree.Element("definition")
def prettyPrint(self, etNode=None):
"""prints pretty XML making use of etree
:param etNode: node
:type etNode: :class:`lxml.etree.Element`
"""
node = etNode if etNode is not None else self.elem
xmls = _tostr(
lxml.etree.tostring(
node, encoding='utf8',
method='xml', pretty_print=True))
if not xmls.startswith("<?xml"):
xmls = "<?xml version='1.0' encoding='utf8'?>\n" + xmls
return xmls
def dump(self):
""" dumps XML structure into the XML file
:brief: It opens XML file, calls prettyPrint and closes the XML file
"""
with open(self.fname, "w") as myfile:
myfile.write(self.prettyPrint(self.elem))
def main():
""" the main function
"""
#: handler to XML file
df = XMLFile("test.xml")
#: entry
en = NGroup(df, "entry1", "NXentry")
#: instrument
ins = NGroup(en, "instrument", "NXinstrument")
#: NXsource
src = NGroup(ins, "source", "NXsource")
#: field
f = NField(src, "distance", "NX_FLOAT")
f.setUnits("m")
f.setText("100.")
df.dump()
if __name__ == "__main__":
main()
| nexdatas/tools | nxstools/nxsxml.py | Python | gpl-3.0 | 24,266 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import time
from urllib import quote
from lxml.html import fromstring, tostring
from calibre.ebooks.metadata.sources.base import Source
from calibre import browser, url_slash_cleaner
from calibre.utils.cleantext import clean_ascii_chars
class Helion(Source):
name = 'Helion'
description = _('Pobiera metadane z helion.pl')
author = 'pikpok'
supported_platforms = ['windows', 'osx', 'linux']
version = (0, 0, 4)
minimum_calibre_version = (0, 8, 0)
capabilities = frozenset(['identify', 'cover'])
touched_fields = frozenset(['title', 'authors', 'identifier:helion',
'identifier:isbn', 'rating', 'publisher', 'pubdate', 'languages'])
supports_gzip_transfer_encoding = True
def identify(self, log, result_queue, abort, title=None, authors=None, identifiers={}, timeout=30):
matches = []
br = self.browser
q = ''
title_tokens = list(self.get_title_tokens(title, strip_joiners=False, strip_subtitle=True))
if title_tokens:
tokens = [quote(t.encode('iso-8859-2')) for t in title_tokens]
q += '+'.join(tokens)
if authors:
authors_tokens = self.get_author_tokens(authors, only_first_author=True)
if authors_tokens:
q += '+'
tokens = [quote(t.encode('iso-8859-2')) for t in authors_tokens]
q += '+'.join(tokens)
query = 'http://helion.pl/search?qa=&szukaj=%s&sortby=wd&wsprzed=1&wprzyg=1&wyczerp=1&sent=1'%(q)
response = br.open_novisit(query, timeout=timeout)
raw = response.read().strip()
root = fromstring(clean_ascii_chars(raw))
results = root.xpath('*//div[contains(@class,"search-helion")]')
for result in results:
book_url = result.xpath('./a[contains(@href,"ksiazki")]/@href')
matches.append(book_url)
from calibre_plugins.helion.worker import Worker
workers = [Worker(url, result_queue, br, log, i, self) for i, url in enumerate(matches) if url]
for w in workers:
w.start()
time.sleep(0.1)
while not abort.is_set():
a_worker_is_alive = False
for w in workers:
w.join(0.2)
if abort.is_set():
break
if w.is_alive():
a_worker_is_alive = True
if not a_worker_is_alive:
break
return None
def download_cover(self, log, result_queue, abort, title = None, authors = None, identifiers = {}, timeout = 30, get_best_cover = False):
url = self.get_cached_cover_url(identifiers = identifiers)
br = self.browser
try:
cdata = br.open_novisit(url, timeout=timeout).read()
result_queue.put((self, cdata))
except:
log.exception('Failed to download cover from:', url)
def get_cached_cover_url(self, identifiers):
url = None
helion_id = identifiers.get('helion')
if helion_id is not None:
url = 'http://helion.pl/okladki/326x466/%s.jpg'%(helion_id)
return url
if __name__ == '__main__':
'''
Tests
'''
from calibre.ebooks.metadata.sources.test import (test_identify_plugin, title_test, authors_test)
test_identify_plugin(Helion.name,
[
(
{
'title':'Ruby on Rails. Wprowadzenie',
'authors':['Bruce A. Tate & Curt Hibbs']
},
[
title_test('Ruby on Rails. Wprowadzenie'),
authors_test(['Bruce A. Tate', 'Curt Hibbs'])
]
)
]
)
test_identify_plugin(Helion.name,
[
(
{
'title':u'Jak pozostać anonimowym w sieci',
'authors':[u'Radosław Sokół']
},
[
title_test(u'Jak pozostać anonimowym w sieci'),
authors_test([u'Radosław Sokół'])
]
)
]
)
| pikpok/calibre-helion | __init__.py | Python | mit | 4,162 |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import PIL.Image as image
import time, re, random, os
import requests
from retrying import retry
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def input_by_id(driver, text=u"江苏汇中", element_id="name"):
"""输入查询关键词
:text: Unicode, 要输入的文本
:element_id: 输入框网页元素id
"""
input_el = driver.find_element_by_id(element_id)
input_el.clear()
input_el.send_keys(text)
# time.sleep(3.5)
def click_by_id(driver, element_id="popup-submit"):
"""点击查询按钮
:element_id: 查询按钮网页元素id
"""
search_el = driver.find_element_by_id(element_id)
search_el.click()
def get_fail_signal(driver): # gt_info_type
alarm_info = driver.find_element_by_xpath("//div[@class='gt_info_text']/span").text
#
print(alarm_info)
if alarm_info.find(u'验证失败') != -1:
return True
else:
return False
@retry
def do_something_unreliable():
iedriver = "C:\Program Files\Internet Explorer\IEDriverServer_x64_2.53.1.exe"
driver = webdriver.Ie(iedriver) # webdriver.Firefox()
driver.get("http://www.gsxt.gov.cn/index.html")
data = "扬中市皇朝家具有限公司"
input_by_id(driver, text=data, element_id="keyword")
click_by_id(driver, element_id="btn_query")
print(driver.find_element_by_xpath("//div[@class='gt_cut_bg gt_show']/div").get_attribute('style'))
element = driver.find_element_by_xpath("//div[@class='gt_slider_knob gt_show']")
ActionChains(driver).click_and_hold(on_element=element).perform()
time.sleep(0.15)
ActionChains(driver).release(on_element=element).perform()
WebDriverWait(driver, 30).until(
lambda the_driver: the_driver.find_element_by_xpath(
"//div[@class='gt_info_text']").is_displayed()
)
print(get_fail_signal(driver))
if not get_fail_signal(driver):
time.sleep(3)
WebDriverWait(driver, 30).until(
lambda the_driver: the_driver.find_element_by_xpath(
"//div[@class='search_result g9']").is_displayed()
)
str1 = data + '!' + driver.find_element_by_xpath("//div[@class='search_result g9']").text
else:
print("验证码不通过")
if __name__ == '__main__':
do_something_unreliable()
| cysuncn/python | study/numpyStudy/retryTest.py | Python | gpl-3.0 | 2,492 |
"""
Copyright 2007 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
from os import path
from gi.repository import Gtk
from . import Constants, Utils, Dialogs
class FileDialogHelper(Gtk.FileChooserDialog, object):
"""
A wrapper class for the gtk file chooser dialog.
Implement a file chooser dialog with only necessary parameters.
"""
title = ''
action = Gtk.FileChooserAction.OPEN
filter_label = ''
filter_ext = ''
def __init__(self, parent, current_file_path):
"""
FileDialogHelper constructor.
Create a save or open dialog with cancel and ok buttons.
Use standard settings: no multiple selection, local files only, and the * filter.
Args:
action: Gtk.FileChooserAction.OPEN or Gtk.FileChooserAction.SAVE
title: the title of the dialog (string)
"""
ok_stock = {
Gtk.FileChooserAction.OPEN: 'gtk-open',
Gtk.FileChooserAction.SAVE: 'gtk-save'
}[self.action]
Gtk.FileChooserDialog.__init__(self, title=self.title, action=self.action,
transient_for=parent)
self.add_buttons('gtk-cancel', Gtk.ResponseType.CANCEL, ok_stock, Gtk.ResponseType.OK)
self.set_select_multiple(False)
self.set_local_only(True)
self.parent = parent
self.current_file_path = current_file_path or path.join(
Constants.DEFAULT_FILE_PATH, Constants.NEW_FLOGRAPH_TITLE + Constants.FILE_EXTENSION)
self.set_current_folder(path.dirname(current_file_path)) # current directory
self.setup_filters()
def setup_filters(self, filters=None):
set_default = True
filters = filters or ([(self.filter_label, self.filter_ext)] if self.filter_label else [])
filters.append(('All Files', ''))
for label, ext in filters:
if not label:
continue
f = Gtk.FileFilter()
f.set_name(label)
f.add_pattern('*' + ext)
self.add_filter(f)
if not set_default:
self.set_filter(f)
set_default = True
def run(self):
"""Get the filename and destroy the dialog."""
response = Gtk.FileChooserDialog.run(self)
filename = self.get_filename() if response == Gtk.ResponseType.OK else None
self.destroy()
return filename
class SaveFileDialog(FileDialogHelper):
"""A dialog box to save or open flow graph files. This is a base class, do not use."""
action = Gtk.FileChooserAction.SAVE
def __init__(self, parent, current_file_path):
super(SaveFileDialog, self).__init__(parent, current_file_path)
self.set_current_name(path.splitext(path.basename(self.current_file_path))[0] + self.filter_ext)
self.set_create_folders(True)
self.set_do_overwrite_confirmation(True)
class OpenFileDialog(FileDialogHelper):
"""A dialog box to save or open flow graph files. This is a base class, do not use."""
action = Gtk.FileChooserAction.OPEN
def show_missing_message(self, filename):
Dialogs.MessageDialogWrapper(
self.parent,
Gtk.MessageType.WARNING, Gtk.ButtonsType.CLOSE, 'Cannot Open!',
'File <b>{filename}</b> Does not Exist!'.format(filename=Utils.encode(filename)),
).run_and_destroy()
def get_filename(self):
"""
Run the dialog and get the filename.
If this is a save dialog and the file name is missing the extension, append the file extension.
If the file name with the extension already exists, show a overwrite dialog.
If this is an open dialog, return a list of filenames.
Returns:
the complete file path
"""
filenames = Gtk.FileChooserDialog.get_filenames(self)
for filename in filenames:
if not path.exists(filename):
self.show_missing_message(filename)
return None # rerun
return filenames
class OpenFlowGraph(OpenFileDialog):
title = 'Open a Flow Graph from a File...'
filter_label = 'Flow Graph Files'
filter_ext = Constants.FILE_EXTENSION
def __init__(self, parent, current_file_path=''):
super(OpenFlowGraph, self).__init__(parent, current_file_path)
self.set_select_multiple(True)
class OpenQSS(OpenFileDialog):
title = 'Open a QSS theme...'
filter_label = 'QSS Themes'
filter_ext = '.qss'
class SaveFlowGraph(SaveFileDialog):
title = 'Save a Flow Graph to a File...'
filter_label = 'Flow Graph Files'
filter_ext = Constants.FILE_EXTENSION
class SaveConsole(SaveFileDialog):
title = 'Save Console to a File...'
filter_label = 'Test Files'
filter_ext = '.txt'
class SaveScreenShot(SaveFileDialog):
title = 'Save a Flow Graph Screen Shot...'
filters = [('PDF Files', '.pdf'), ('PNG Files', '.png'), ('SVG Files', '.svg')]
filter_ext = '.pdf' # the default
def __init__(self, parent, current_file_path=''):
super(SaveScreenShot, self).__init__(parent, current_file_path)
self.config = Gtk.Application.get_default().config
self._button = button = Gtk.CheckButton(label='Background transparent')
self._button.set_active(self.config.screen_shot_background_transparent())
self.set_extra_widget(button)
def setup_filters(self, filters=None):
super(SaveScreenShot, self).setup_filters(self.filters)
def show_missing_message(self, filename):
Dialogs.MessageDialogWrapper(
self.parent,
Gtk.MessageType.ERROR, Gtk.ButtonsType.CLOSE, 'Can not Save!',
'File Extension of <b>{filename}</b> not supported!'.format(filename=Utils.encode(filename)),
).run_and_destroy()
def run(self):
valid_exts = {ext for label, ext in self.filters}
filename = None
while True:
response = Gtk.FileChooserDialog.run(self)
if response != Gtk.ResponseType.OK:
filename = None
break
filename = self.get_filename()
if path.splitext(filename)[1] in valid_exts:
break
self.show_missing_message(filename)
bg_transparent = self._button.get_active()
self.config.screen_shot_background_transparent(bg_transparent)
self.destroy()
return filename, bg_transparent
| mrjacobagilbert/gnuradio | grc/gui/FileDialogs.py | Python | gpl-3.0 | 6,524 |
# Django settings for testautoslug project.
import os
PROJECT_ROOT = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_ROOT, 'dev.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '44mxeh8nkm^ycwef-eznwgk&8_lwc!j9r)h3y_^ypz1iom18pa'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testautoslug.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'testapp',
)
| Egregors/django-autoslug-field | testautoslug/settings.py | Python | mit | 3,357 |
# vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
frequency = PER_INSTANCE
# This is a tool that cloud init provides
HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints'
def _get_helper_tool_path(distro):
try:
base_lib = distro.usr_lib_exec
except AttributeError:
base_lib = '/usr/lib'
return HELPER_TOOL_TPL % base_lib
def handle(name, cfg, cloud, log, _args):
helper_path = _get_helper_tool_path(cloud.distro)
if not os.path.exists(helper_path):
log.warn(("Unable to activate module %s,"
" helper tool not found at %s"), name, helper_path)
return
fp_blacklist = util.get_cfg_option_list(cfg,
"ssh_fp_console_blacklist", [])
key_blacklist = util.get_cfg_option_list(cfg,
"ssh_key_console_blacklist",
["ssh-dss"])
try:
cmd = [helper_path]
cmd.append(','.join(fp_blacklist))
cmd.append(','.join(key_blacklist))
(stdout, _stderr) = util.subp(cmd)
util.multi_log("%s\n" % (stdout.strip()),
stderr=False, console=True)
except Exception:
log.warn("Writing keys to the system console failed!")
raise
| prometheanfire/cloud-init | cloudinit/config/cc_keys_to_console.py | Python | gpl-3.0 | 2,179 |
import setpath
import functions
import json
registered=True
# Filters the input table using the filters argument
# For example
# lala table :
# [1|1[2|tear-prod-rate[3|normal[4|?
# --- [0|Column names ---
# [1|no [2|colname [3|val [4|nextnode
# var 'filters' from select tabletojson(colname,val, "colname,val") from lala;
# or
# var 'filters' '[{"colname": "tear-prod-rate", "val": "normal"}]'
# filtertable filters:%{filters} select * from mytable;
class filtertable(functions.vtable.vtbase.VT):
def VTiter(self, *parsedArgs,**envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1],"No query argument ")
query = dictargs['query']
if 'filters' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1],"No filters ")
# print dictargs['filters']
filters = json.loads(dictargs['filters'])
cur = envars['db'].cursor()
c=cur.execute(query)
schema = cur.getdescriptionsafe()
schemaold = []
schemanew = []
for i in xrange(len(schema)):
schemaold.append(str(schema[i][0]))
schemanew.append(str(schema[i][0]))
for filter in filters:
schemanew.remove(str(filter['colname']))
# print schemaold
# print schemanew
yield tuple((x,) for x in schemanew)
for myrow in c:
passfilter = 1
for filter in filters:
colid = schemaold.index(str(filter['colname']))
if myrow[colid]!= str(filter['val']):
passfilter = 0
if passfilter ==1:
newrow=[]
for i in xrange(len(schemanew)):
# print schemanew[i],i
if schemanew[i] in schemaold:
colid2 = schemaold.index(schemanew[i])
newrow.append(myrow[colid2])
yield tuple(newrow,)
# no = 0
# for myrow in c:
# first_tuple = []
# schema1 = []
# for item in xrange(len(schema)):
# if schema[item][0] in metadata:
# vals = metadata[schema[item][0]].split(',')
# vals.sort()
# for v in vals:
# newv = str(schema[item][0]) + '(' + str(v) + ')'
#
# schema1.append(newv)
# if myrow[item] == v:
# first_tuple.append(1)
# else :
# first_tuple.append(0)
# else:
# # print 'no', schema[item][0]
# newv = str(schema[item][0])
# schema1.append(newv)
# first_tuple.append(myrow[item])
#
#
# if no == 0:
# # print tuple((x,) for x in schema1)
# yield tuple((x,) for x in schema1)
# no =no+1
#
# # print str(first_tuple)
# yield tuple(first_tuple,)
def Source():
return functions.vtable.vtbase.VTGenerator(filtertable)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.tes | madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/functionslocal/vtable/filtertable.py | Python | mit | 3,582 |
from __future__ import absolute_import
import os
import unittest
from svtplay_dl.fetcher.dash import _dashparse
from svtplay_dl.fetcher.dash import parse_duration
from svtplay_dl.utils.parser import setup_defaults
def parse(playlist):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "dash-manifests", playlist)) as fd:
manifest = fd.read()
return _dashparse(setup_defaults(), manifest, "http://localhost", None, None)
class dashtest(unittest.TestCase):
def test_parse_cmore(self):
data = parse("cmore.mpd")
assert len(data[3261.367].files) == 410
assert len(data[3261.367].audio) == 615
assert data[3261.367].segments
def test_parse_fff(self):
data = parse("fff.mpd")
assert len(data[3187.187].files) == 578
assert len(data[3187.187].audio) == 577
assert data[3187.187].segments
def test_parse_nya(self):
data = parse("svtvod.mpd")
assert len(data[2793.0].files) == 350
assert len(data[2793.0].audio) == 350
assert data[2793.0].segments
def test_parse_live(self):
data = parse("svtplay-live.mpd")
assert len(data[2795.9959999999996].files) == 6
assert len(data[2795.9959999999996].audio) == 6
assert data[2795.9959999999996].segments
def test_parse_live2(self):
data = parse("svtplay-live2.mpd")
assert len(data[2892.0].files) == 11
assert len(data[2892.0].audio) == 11
assert data[2892.0].segments
def test_parse_duration(self):
assert parse_duration("PT3459.520S") == 3459.52
assert parse_duration("PT2.00S") == 2.0
assert parse_duration("PT1H0M30.000S") == 3630.0
assert parse_duration("P1Y1M1DT1H0M30.000S") == 34218030.0
assert parse_duration("PWroNG") == 0
| olof/svtplay-dl | lib/svtplay_dl/tests/test_dash.py | Python | mit | 1,835 |
import sys
sys.path.insert(0, "/input/") #for
sys.path.insert(0, "../common/") #for local
import common
from keras.models import Sequential
from keras.layers import *
model = Sequential()
model.add(Conv2D(8, (3, 3), padding='same',
input_shape=(common.resolution_x, common.resolution_y, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Conv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(24, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(8, (3, 3), padding='same'))
model.add(Dropout(0.25))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Conv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(GlobalAveragePooling2D())
for i in range(1):
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
common.experiment(model)
| cytadela8/trypophobia | models/small_model_Artur/main.py | Python | gpl-3.0 | 1,293 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
class ChannelShuffleOpsTest(serial.SerializedTestCase):
def _channel_shuffle_nchw_ref(self, X, group):
dims = X.shape
N = dims[0]
C = dims[1]
G = group
K = int(C / G)
X = X.reshape(N, G, K, -1)
Y = np.transpose(X, axes=(0, 2, 1, 3))
return [Y.reshape(dims)]
def _channel_shuffle_nhwc_ref(self, X, group):
dims = X.shape
N = dims[0]
C = dims[-1]
G = group
K = int(C / G)
X = X.reshape(N, -1, G, K)
Y = np.transpose(X, axes=(0, 1, 3, 2))
return [Y.reshape(dims)]
@serial.given(N=st.integers(1, 5), G=st.integers(1, 5), K=st.integers(1, 5),
H=st.integers(1, 5), W=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
def test_channel_shuffle(self, N, G, K, H, W, order, gc, dc):
C = G * K
if order == "NCHW":
X = np.random.randn(N, C, H, W).astype(np.float32)
else:
X = np.random.randn(N, H, W, C).astype(np.float32)
op = core.CreateOperator(
"ChannelShuffle",
["X"],
["Y"],
group=G,
order=order,
)
def channel_shuffle_ref(X):
if order == "NCHW":
return self._channel_shuffle_nchw_ref(X, G)
else:
return self._channel_shuffle_nhwc_ref(X, G)
self.assertReferenceChecks(gc, op, [X], channel_shuffle_ref)
self.assertGradientChecks(gc, op, [X], 0, [0])
self.assertDeviceChecks(dc, op, [X], [0])
| ryfeus/lambda-packs | pytorch/source/caffe2/python/operator_test/channel_shuffle_test.py | Python | mit | 1,970 |
#!/usr/bin/env python2
import logging
logging.basicConfig(level=logging.WARNING)
from functools import partial
import wx
from spacq import VERSION
from spacq.gui.display.plot.static.delegator import formats, available_formats
from spacq.gui.display.table.filter import FilterListDialog
from spacq.gui.display.table.generic import TabularDisplayFrame
from spacq.gui.tool.box import load_csv, MessageDialog
class DataExplorerApp(wx.App):
default_title = 'Data Explorer'
def OnInit(self):
self.filters = {}
self.filter_columns = {}
self.filter_dialog = None
# Frames.
self.csv_frame = TabularDisplayFrame(None, title=self.default_title)
# Menu.
menuBar = wx.MenuBar()
## File.
menu = wx.Menu()
menuBar.Append(menu, '&File')
item = menu.Append(wx.ID_OPEN, '&Open...')
self.Bind(wx.EVT_MENU, self.OnMenuFileOpen, item)
item = menu.Append(wx.ID_CLOSE, '&Close')
self.Bind(wx.EVT_MENU, self.OnMenuFileClose, item)
menu.AppendSeparator()
self.filter_menu_item = menu.Append(wx.ID_ANY, '&Filters...')
self.filter_menu_item.Enable(False)
self.Bind(wx.EVT_MENU, self.OnMenuFileFilters, self.filter_menu_item)
menu.AppendSeparator()
item = menu.Append(wx.ID_EXIT, 'E&xit')
self.Bind(wx.EVT_MENU, self.OnMenuFileExit, item)
## Plot.
menu = wx.Menu()
menuBar.Append(menu, '&Plot')
menu.Append(wx.ID_ANY, ' 2D:').Enable(False)
self.two_dimensional_menu = menu.Append(wx.ID_ANY, '&Curve...')
self.Bind(wx.EVT_MENU, partial(self.create_plot, formats.two_dimensional),
self.two_dimensional_menu)
menu.AppendSeparator()
menu.Append(wx.ID_ANY, ' 3D:').Enable(False)
self.colormapped_menu = menu.Append(wx.ID_ANY, '&Colormapped...')
self.Bind(wx.EVT_MENU, partial(self.create_plot, formats.colormapped),
self.colormapped_menu)
self.surface_menu = menu.Append(wx.ID_ANY, '&Surface...')
self.Bind(wx.EVT_MENU, partial(self.create_plot, formats.surface),
self.surface_menu)
menu.AppendSeparator()
menu.Append(wx.ID_ANY, ' List:').Enable(False)
self.waveforms_menu = menu.Append(wx.ID_ANY, '&Waveforms...')
self.Bind(wx.EVT_MENU, partial(self.create_plot, formats.waveforms, type='list'),
self.waveforms_menu)
## Help.
menu = wx.Menu()
menuBar.Append(menu, '&Help')
### About.
item = menu.Append(wx.ID_ABOUT, '&About...')
self.Bind(wx.EVT_MENU, self.OnMenuHelpAbout, item)
self.csv_frame.SetMenuBar(menuBar)
self.update_plot_menus(False)
# Display.
self.csv_frame.Show()
self.csv_frame.SetSize((800, 600))
self.SetTopWindow(self.csv_frame)
self.csv_frame.Raise()
return True
def update_plot_menus(self, status):
"""
If status is True, enable the plot menus corresponding to the available formats. Otherwise, disable all.
"""
pairs = [
(formats.two_dimensional, self.two_dimensional_menu),
(formats.colormapped, self.colormapped_menu),
(formats.surface, self.surface_menu),
(formats.waveforms, self.waveforms_menu),
]
for format, menu in pairs:
if not status or format in available_formats:
menu.Enable(status)
def create_plot(self, format, evt=None, type='scalar'):
"""
Open up a dialog to configure the selected plot format.
"""
headings, rows, types = self.csv_frame.display_panel.GetValue(types=[type])
available_formats[format](self.csv_frame, headings, rows).Show()
def OnMenuFileOpen(self, evt=None):
try:
result = load_csv(self.csv_frame)
except IOError as e:
MessageDialog(self.csv_frame, str(e), 'Could not load data').Show()
return
if result is None:
return
else:
self.OnMenuFileClose()
has_header, values, filename = result
self.csv_frame.display_panel.from_csv_data(has_header, values)
self.csv_frame.Title = '{0} - {1}'.format(filename, self.default_title)
self.update_plot_menus(len(self.csv_frame.display_panel) > 0)
self.filter_menu_item.Enable(True)
def OnMenuFileClose(self, evt=None):
self.csv_frame.display_panel.SetValue([], [])
self.csv_frame.Title = self.default_title
self.update_plot_menus(False)
self.filter_menu_item.Enable(False)
if self.filter_dialog is not None:
self.filter_dialog.Close()
self.filters = {}
self.filter_columns = {}
def OnMenuFileFilters(self, evt=None):
def close_callback(dlg):
self.filters = dlg.filters
self.filter_columns = dlg.filter_columns
self.filter_dialog = None
if self.filter_dialog is None:
self.filter_dialog = FilterListDialog(self.csv_frame, self.csv_frame.display_panel.table,
close_callback, self.filters, self.filter_columns)
self.filter_dialog.Show()
self.filter_dialog.Raise()
def OnMenuFileExit(self, evt=None):
if self.csv_frame:
self.csv_frame.Close()
def OnMenuHelpAbout(self, evt=None):
info = wx.AboutDialogInfo()
info.SetName('Data Explorer')
info.SetDescription('An application for displaying data in tabular and graphical form.\n'
'\n'
'Using Spanish Acquisition version {0}.'.format(VERSION)
)
wx.AboutBox(info)
if __name__ == "__main__":
app = DataExplorerApp()
app.MainLoop()
| 0/SpanishAcquisition | examples/data_explorer.py | Python | bsd-2-clause | 5,055 |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from rally.plugins.openstack.context.ceilometer import samples
from tests.unit import test
CTX = "rally.plugins.openstack.context.ceilometer"
class CeilometerSampleGeneratorTestCase(test.TestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = {"name": str(id_)}
return tenants
def _gen_context(self, tenants_count, users_per_tenant,
resources_per_tenant, samples_per_resource):
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants.keys():
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"endpoint": mock.MagicMock()})
context = {
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"ceilometer": {
"counter_name": "fake-counter-name",
"counter_type": "fake-counter-type",
"counter_unit": "fake-counter-unit",
"counter_volume": 100,
"resources_per_tenant": resources_per_tenant,
"samples_per_resource": samples_per_resource
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"task": mock.MagicMock(),
"users": users,
"tenants": tenants
}
return tenants, context
def test_init(self):
context = {}
context["task"] = mock.MagicMock()
context["config"] = {
"ceilometer": {
"counter_name": "cpu_util",
"counter_type": "gauge",
"counter_unit": "instance",
"counter_volume": 1.0,
"resources_per_tenant": 5,
"samples_per_resource": 5
}
}
inst = samples.CeilometerSampleGenerator(context)
self.assertEqual(inst.config, context["config"]["ceilometer"])
@mock.patch("%s.samples.ceilo_utils.CeilometerScenario._create_sample"
% CTX)
def test_setup(self, mock_ceilometer_scenario__create_sample):
tenants_count = 2
users_per_tenant = 2
resources_per_tenant = 2
samples_per_resource = 2
tenants, real_context = self._gen_context(
tenants_count, users_per_tenant,
resources_per_tenant, samples_per_resource)
sample = {
"counter_name": "fake-counter-name",
"counter_type": "fake-counter-type",
"counter_unit": "fake-counter-unit",
"counter_volume": 100,
"resource_id": "fake-resource-id"
}
new_context = copy.deepcopy(real_context)
for id_ in tenants.keys():
new_context["tenants"][id_].setdefault("samples", [])
new_context["tenants"][id_].setdefault("resources", [])
for i in range(resources_per_tenant):
for j in range(samples_per_resource):
new_context["tenants"][id_]["samples"].append(sample)
new_context["tenants"][id_]["resources"].append(
sample["resource_id"])
mock_ceilometer_scenario__create_sample.return_value = [
mock.MagicMock(to_dict=lambda: sample, **sample)]
ceilometer_ctx = samples.CeilometerSampleGenerator(real_context)
ceilometer_ctx.setup()
self.assertEqual(new_context, ceilometer_ctx.context)
def test_cleanup(self):
tenants, context = self._gen_context(2, 5, 3, 3)
ceilometer_ctx = samples.CeilometerSampleGenerator(context)
ceilometer_ctx.cleanup()
| go-bears/rally | tests/unit/plugins/openstack/context/ceilometer/test_samples.py | Python | apache-2.0 | 4,459 |
import turtle
import time
window = turtle.Screen()
def test():
allturtles = []
for i in range(4):
t1 = turtle.Turtle()
t2 = turtle.Turtle()
t3 = turtle.Turtle()
t4 = turtle.Turtle()
t1.speed(0)
t2.speed(0)
t3.speed(0)
t4.speed(0)
t1.penup()
t2.penup()
t3.penup()
t4.penup()
t1.setx(50*i)
t1.sety(50*i)
t2.setx(50*i)
t2.sety(-50*i)
t3.setx(-50*i)
t3.sety(50*i)
t4.setx(-50*i)
t4.sety(-50*i)
t1.pendown()
t2.pendown()
t3.pendown()
t4.pendown()
t1.ht()
t2.ht()
t3.ht()
t4.ht()
allturtles.append([t1,t2,t3,t4])
start = time.clock()
for degrees in range(360):
for line in allturtles:
for t in line:
for repeat in range(2):
t.fd(200)
t.lt(90)
t.lt(1)
print "That took %f seconds." %(time.clock()-start)
test()
window.exitonclick()
| willybh11/python | advProg/turtleStuff/helloturtleworld.py | Python | gpl-3.0 | 846 |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Any
from apache_beam.coders.coder_impl import StreamCoderImpl, create_InputStream, create_OutputStream
from pyflink.fn_execution.stream_slow import OutputStream
from pyflink.fn_execution.beam.beam_stream_slow import BeamInputStream, BeamTimeBasedOutputStream
class PassThroughLengthPrefixCoderImpl(StreamCoderImpl):
def __init__(self, value_coder):
self._value_coder = value_coder
def encode_to_stream(self, value, out: create_OutputStream, nested: bool) -> Any:
self._value_coder.encode_to_stream(value, out, nested)
def decode_from_stream(self, in_stream: create_InputStream, nested: bool) -> Any:
return self._value_coder.decode_from_stream(in_stream, nested)
def get_estimated_size_and_observables(self, value: Any, nested=False):
return 0, []
def __repr__(self):
return 'PassThroughLengthPrefixCoderImpl[%s]' % self._value_coder
class FlinkFieldCoderBeamWrapper(StreamCoderImpl):
"""
Bridge between Beam coder and Flink coder for the low-level FieldCoder.
"""
def __init__(self, value_coder):
self._value_coder = value_coder
self._data_output_stream = OutputStream()
def encode_to_stream(self, value, out_stream: create_OutputStream, nested):
self._value_coder.encode_to_stream(value, self._data_output_stream)
out_stream.write(self._data_output_stream.get())
self._data_output_stream.clear()
def decode_from_stream(self, in_stream: create_InputStream, nested):
data_input_stream = BeamInputStream(in_stream)
return self._value_coder.decode_from_stream(data_input_stream)
def __repr__(self):
return 'FlinkFieldCoderBeamWrapper[%s]' % self._value_coder
class FlinkLengthPrefixCoderBeamWrapper(FlinkFieldCoderBeamWrapper):
"""
Bridge between Beam coder and Flink coder for the top-level LengthPrefixCoder.
"""
def __init__(self, value_coder):
super(FlinkLengthPrefixCoderBeamWrapper, self).__init__(value_coder)
self._output_stream = BeamTimeBasedOutputStream()
def encode_to_stream(self, value, out_stream: create_OutputStream, nested):
self._output_stream.reset_output_stream(out_stream)
self._value_coder.encode_to_stream(value, self._data_output_stream)
self._output_stream.write(self._data_output_stream.get())
self._data_output_stream.clear()
def __repr__(self):
return 'FlinkLengthPrefixCoderBeamWrapper[%s]' % self._value_coder
| apache/flink | flink-python/pyflink/fn_execution/beam/beam_coder_impl_slow.py | Python | apache-2.0 | 3,466 |
##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class BerkeleyDb(AutotoolsPackage):
"""Oracle Berkeley DB"""
homepage = "http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/overview/index.html"
url = "http://download.oracle.com/berkeley-db/db-5.3.28.tar.gz"
version('5.3.28', 'b99454564d5b4479750567031d66fe24')
version('6.0.35', 'c65a4d3e930a116abaaf69edfc697f25')
version('6.1.29', '7f4d47302dfec698fe088e5285c9098e')
version('6.2.32', '33491b4756cb44b91c3318b727e71023')
configure_directory = 'dist'
build_directory = 'spack-build'
def url_for_version(self, version):
# newer version need oracle login, so get them from gentoo mirror
return 'http://distfiles.gentoo.org/distfiles/db-{0}.tar.gz'.format(version)
def configure_args(self):
return ['--disable-static', '--enable-cxx', '--enable-stl']
| skosukhin/spack | var/spack/repos/builtin/packages/berkeley-db/package.py | Python | lgpl-2.1 | 2,089 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
class _BoxSizer(wx.BoxSizer):
def __init__(self):
wx.BoxSizer.__init__(self, self.direction)
def add(self, component, proportion=0, flag=0):
self.Add(component, proportion=proportion, flag=flag)
def add_with_padding(self, component, padding=5):
self.Add(component, flag=wx.ALL, border=padding)
def add_expanding(self, component, propotion=1, padding=0):
self.Add(component, proportion=propotion, flag=wx.EXPAND | wx.ALL,
border=padding)
class VerticalSizer(_BoxSizer):
direction = wx.VERTICAL
class HorizontalSizer(_BoxSizer):
direction = wx.HORIZONTAL
def add_to_end(self, component):
self.Add(component, flag=wx.ALIGN_RIGHT)
| fingeronthebutton/RIDE | src/robotide/widgets/sizers.py | Python | apache-2.0 | 1,341 |
from setuptools import setup
setup(
name="python-cmr",
version="0.4.1",
license="MIT",
url="https://github.com/jddeal/python-cmr",
description="Python wrapper to the NASA Common Metadata Repository (CMR) API.",
long_description=open("README.rst").read(),
author="Justin Deal, Matt Isnor",
author_email="deal.justin@gmail.com, isnor.matt@gmail.com",
packages=["cmr"],
install_requires=[
"requests",
]
)
| jddeal/python-cmr | setup.py | Python | mit | 455 |
# -*- coding:utf-8 -*-
__author__ = 'q00222219@huawei'
import time
from heat.openstack.common import log as logging
import heat.engine.resources.cloudmanager.commonutils as commonutils
import heat.engine.resources.cloudmanager.constant as constant
import heat.engine.resources.cloudmanager.exception as exception
import pdb
LOG = logging.getLogger(__name__)
class CascadedConfiger(object):
def __init__(self, public_ip_api, api_ip, domain, user, password,
cascading_domain, cascading_api_ip, cascaded_domain,
cascaded_api_ip, cascaded_api_subnet_gateway):
self.public_ip_api = public_ip_api
self.api_ip = api_ip
self.domain = domain
self.user = user
self.password = password
self.cascading_domain = cascading_domain
self.cascading_api_ip = cascading_api_ip
self.cascaded_domain = cascaded_domain
self.cascaded_ip = cascaded_api_ip
self.gateway = cascaded_api_subnet_gateway
def do_config(self):
start_time = time.time()
#pdb.set_trace()
LOG.info("start config cascaded, cascaded: %s" % self.domain)
# wait cascaded tunnel can visit
commonutils.check_host_status(host=self.public_ip_api,
user=self.user,
password=self.password,
retry_time=500, interval=1)
# config cascaded host
self._config_az_cascaded()
cost_time = time.time() - start_time
LOG.info("first config success, cascaded: %s, cost time: %d"
% (self.domain, cost_time))
# check config result
for i in range(3):
try:
# check 90s
commonutils.check_host_status(
host=self.public_ip_api,
user=constant.VcloudConstant.ROOT,
password=constant.VcloudConstant.ROOT_PWD,
retry_time=15,
interval=1)
LOG.info("cascaded api is ready..")
break
except exception.CheckHostStatusFailure:
if i == 2:
LOG.error("check cascaded api failed ...")
break
LOG.error("check cascaded api error, "
"retry config cascaded ...")
self._config_az_cascaded()
cost_time = time.time() - start_time
LOG.info("config cascaded success, cascaded: %s, cost_time: %d"
% (self.domain, cost_time))
def _config_az_cascaded(self):
LOG.info("start config cascaded host, host: %s" % self.api_ip)
# modify dns server address
address = "/%(cascading_domain)s/%(cascading_ip)s,/%(cascaded_domain)s/%(cascaded_ip)s" \
% {"cascading_domain": self.cascading_domain,
"cascading_ip": self.cascading_api_ip,
"cascaded_domain":self.cascaded_domain,
"cascaded_ip":self.cascaded_ip}
for i in range(30):
try:
commonutils.execute_cmd_without_stdout(
host=self.public_ip_api,
user=self.user,
password=self.password,
cmd='cd %(dir)s; source /root/adminrc; sh %(script)s replace %(address)s'
% {"dir": constant.PublicConstant.SCRIPTS_DIR,
"script": constant.PublicConstant.
MODIFY_DNS_SERVER_ADDRESS,
"address": address})
break
except exception.SSHCommandFailure as e:
LOG.error("modify cascaded dns address error, cascaded: "
"%s, error: %s"
% (self.domain, e.format_message()))
time.sleep(1)
LOG.info(
"config cascaded dns address success, cascaded: %s"
% self.public_ip_api)
return True
| hgqislub/hybird-orchard | code/cloudmanager/install/hws/hws_cascaded_configer.py | Python | apache-2.0 | 4,081 |
import random
import string
def generate_random_key(letters=True, digits=True, punctuation=True,
length=50):
charset = []
if letters:
charset.append(string.ascii_letters)
if digits:
charset.append(string.digits)
if punctuation:
charset.append(string.punctuation)
if not charset:
return ''
chars = (''.join(charset).replace('\'', '')
.replace('"', '')
.replace('\\', ''))
return ''.join([random.choice(chars) for i in range(length)])
| Outernet-Project/librarian-core | librarian_core/contrib/auth/utils.py | Python | gpl-3.0 | 579 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.