repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
adrianholovaty/django
|
refs/heads/master
|
django/core/management/base.py
|
5
|
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from io import BytesIO
from optparse import make_option, OptionParser
import traceback
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``, except if force-skipped). If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr.
"""
show_traceback = options.get('traceback', False)
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
saved_lang = None
if self.can_import_settings:
try:
from django.utils import translation
saved_lang = translation.get_language()
translation.activate('en-us')
except ImportError as e:
# If settings should be available, but aren't,
# raise the error and quit.
if show_traceback:
traceback.print_exc()
else:
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation and not options.get('skip_validation'):
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError as e:
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
finally:
if saved_lang is not None:
translation.activate(saved_lang)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
s = BytesIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found\n" % (num_errors, num_errors != 1 and 's' or ''))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
|
alexandriagroup/rakuten-ws
|
refs/heads/master
|
tests/webservice/rms/test_item.py
|
1
|
# coding: utf-8
from __future__ import unicode_literals
from ... import slugify
URLS = {
'insert': slugify('item test 01'),
'search': slugify('item test 02'),
'update1': slugify('item test 03'),
'update2': slugify('item test 04'),
}
def cleanup(ws):
for label in URLS:
result = ws.rms.item.delete(item={'itemUrl': URLS[label]})
# N000 Successful completion
# C001 Specified Item ID does not exist
assert result['code'] in ('C001', 'N000')
def insert_item(ws, item_url):
item = {
'itemUrl': '%s' % item_url,
'itemNumber': item_url,
'itemName': '%s name' % item_url,
'itemPrice': '999999999',
'genreId': 409148,
'catalogId': '9784088820811',
'itemInventory': {'inventoryType': 1,
'inventories': {'inventory': {'inventoryCount': 11}}},
}
result = ws.rms.item.insert(item=item, raise_for_status=False)
assert result.status['message'] == "OK"
assert result['code'] == "N000"
return result
def test_item_insert(ws):
cleanup(ws)
result = insert_item(ws, URLS['insert'])
item = result['item']
assert item['itemUrl'] == URLS['insert']
def test_item_search(ws):
cleanup(ws)
result = insert_item(ws, URLS['search'])
# item = result['item']
result = ws.rms.item.search(itemUrl=URLS['insert'])
# 200-00: successful completion
assert result['code'] == '200-00'
# assert result['numFound'] > 0
# if result['numFound'] > 1:
# assert result['items']['item'][0]['itemUrl'] == item['itemUrl']
# else:
# assert result['items']['item']['itemUrl'] == item['itemUrl']
def test_update_items(ws):
cleanup(ws)
insert_item(ws, URLS['update1'])
insert_item(ws, URLS['update2'])
items = {
'item': [
{'itemUrl': URLS['update1'], 'itemPrice': 380000},
{'itemUrl': URLS['update2'], 'itemPrice': 480000}
]
}
result = ws.rms.items.update(items=items)
for item_result in result['itemUpdateResult']:
assert item_result['code'] == 'N000'
def test_delete(ws):
cleanup(ws)
|
alirizakeles/tendenci
|
refs/heads/master
|
tendenci/apps/profiles/templatetags/profile_filters.py
|
4
|
from django.db.models import Q
from django.template import Library
from tendenci.apps.invoices.models import Invoice
register = Library()
@register.filter
def allow_edit_by(profile, user):
"""
Check if the profile allows to be edited by the user. Returns True/False.
"""
return profile.allow_edit_by(user)
@register.filter
def invoice_count(user):
inv_count = Invoice.objects.filter(Q(creator=user) | Q(owner=user) | Q(bill_to_email=user.email)).count()
return inv_count
|
astocko/agpy
|
refs/heads/master
|
agpy/collapse_gaussfit.py
|
6
|
"""
-----------------
Collapse Gaussfit
-----------------
This was an early attempt to automate gaussian fitting over a data cube using
(multiple) gaussian decomposition for each spectrum. It's reasonably
effective, but the uses are somewhat minimal. I've tried shifting my
cube-related work to `pyspeckit <pyspeckit.bitbucket.org>`_.
"""
try:
import scipy
from scipy import optimize,sqrt
from scipy.optimize import leastsq
#from scipy.stats.stats import nanmedian,nanmean,_nanmedian
except ImportError:
print "Scipy cold not be loaded. Collapse_gaussfit may fail"
import numpy
from numpy import vectorize,zeros,exp,median,where,asarray,array,nonzero,ma,arange,square
import matplotlib
#matplotlib.use('Agg')
from pylab import indices,figure,clf,savefig,plot,legend,text,axes,title
import pickle
import pyfits
import time
from mad import MAD
from ratosexagesimal import ratos,dectos
def nanmedian(arr):
""" nanmedian - this version is NOT capable of broadcasting (operating along axes) """
return median(arr[arr==arr])
def nanmean(arr):
""" nanmean - this version is NOT capable of broadcasting (operating along axes) """
return (arr[arr==arr]).mean()
# read in file
# filename = sys.argv[1]
# fitsfile = pyfits.open(filename)
# cube = fitsfile[0].data
# def gaussian(dx,sigma):
# return lambda x: exp( - (x-dx)**2 / sigma**2 )
# def return_param(xarr,param):
# errorfunction = lambda p:gaussian(*p)(*indices(xarr.shape))-xarr
# pars, cov, infodict, errmsg, success = optimize.leastsq(errorfunction, [len(xarr)/2.,1], full_output=1)
# print errmsg
# if param == 'width':
# return pars[1]
# elif param == 'center':
# return pars[0]
# else:
# return
def gaussian(dx,sigma,a):
return lambda x: a*exp( - (x-dx)**2 / sigma**2 )
def double_gaussian(dx1,dx2,sigma1,sigma2,a1,a2):
return lambda x: a1*exp( - (x-dx1)**2 / sigma1**2 ) + a2*exp( - (x-dx2)**2 / sigma2**2 )
def triple_gaussian(dx1,dx2,dx3,sigma1,sigma2,sigma3,a1,a2,a3):
return lambda x: abs(a1)*exp( - (x-dx1)**2 / sigma1**2 ) + abs(a2)*exp( - (x-dx2)**2 / sigma2**2 ) + abs(a3)*exp( - (x-dx3)**2 / sigma3**2 )
def n_gaussian(dx,sigma,a):
def g(x):
v = zeros(len(x))
for i in range(len(dx)):
v += a[i] * exp( - ( x - dx[i] )**2 / sigma[i]**2 )
return v
return g
def gerr(xarr):
return lambda p:xarr-gaussian(*p)(*indices(xarr.shape))
def double_gerr(xarr):
return lambda p:xarr-double_gaussian(*p)(*indices(xarr.shape))
def triple_gerr(xarr):
return lambda p:xarr-triple_gaussian(*p)(*indices(xarr.shape))
def return_param(xarr,params=None,negamp=False):
if params == None:
if negamp:
params = [xarr.argmin(),5,xarr.min()]
else:
params = [xarr.argmax(),5,xarr.max()]
pars, cov, infodict, errmsg, success = optimize.leastsq(gerr(xarr), params, full_output=1)
return pars
def return_double_param(xarr,params=None):
if params == None:
params = [xarr.argmax(),xarr.argmax()+3,4.2,2.3,xarr.max(),xarr.max()/2]
pars, cov, infodict, errmsg, success = optimize.leastsq(double_gerr(xarr), params, full_output=1)
return pars
def return_triple_param(xarr,params=None):
"""
input parameters: center[1-3],width[1-3],amplitude[1-3]
"""
if params == None:
params = [xarr.argmax(),xarr.argmax()+3,xarr.argmax(),4.2,2.3,10,xarr.max(),xarr.max()/2.,xarr.max()/5.]
pars, cov, infodict, errmsg, success = optimize.leastsq(triple_gerr(xarr), params, full_output=1)
return pars
def adaptive_collapse_gaussfit(cube,axis=2,nsig=3,nrsig=4,prefix='interesting',
vconv=lambda x: x,xtora=lambda x: x,ytodec=lambda x: x,doplot=True):
"""
Attempts to fit one or two Gaussians to each spectrum in a data cube and returns the parameters of the fits.
Adaptively determines where to fit two Gaussian components based on residuals. Will fit 3 gaussians if a
two-gaussian fit is not better than a certain threshold (specified by nsig), and those fits will be output
to images with filename prefix+(coordinate).png. The 3-gaussian fit parameters will not be returned because
the automated fitting is very unlikely to get that part right.
inputs:
cube - a data cube with two spatial and one spectral dimensions
axis - the axis of the spectral dimension
nsig - number of sigma over the mean residual to trigger double-gaussian fitting
also, cutoff to do any fitting at all
prefix - the prefix (including directory name) of the output images from 3-gaussian fitting
doplot - option to turn off plotting of triple-gaussian fits
vconv,xtora,ytodec - functions to convert the axes from pixel coordinates to ra/dec/velocity coordinates
returns:
width_arr1,width_arr2,chi2_arr,offset_arr1,offset_arr2,amp_arr1,amp_arr2
The Gaussian widths, line centers (in pixel units), amplitudes, and the chi-squared value, not in that order
These returns are identical to the returns from double_gaussian, but all components will be zero for the second
gaussian in the case of a single-gaussian fit
the triple gaussian is guessed to be the double gaussian plus a broad, low-amplitude gaussian. Ideally this should
fit outflows reasonably well, but who knows if it really will.
Another option is to fit a negative-amplitude gaussian to account for self-absorption
"""
std_coll = cube.std(axis=axis) # standard deviation of each spectrum
# mad_coll = MAD(cube,axis=axis)
mean_std = median(std_coll.ravel()) # median standard deviation (to reject high-signal spectra that have high std)
if axis > 0: # force spectral axis to first axis
cube = cube.swapaxes(0,axis)
width_arr = zeros(cube.shape[1:]) # define gaussian param arrays
width_arr1 = zeros(cube.shape[1:]) # define gaussian param arrays
width_arr2 = zeros(cube.shape[1:]) # define gaussian param arrays
amp_arr = zeros(cube.shape[1:]) # define gaussian param arrays
amp_arr1 = zeros(cube.shape[1:]) # define gaussian param arrays
amp_arr2 = zeros(cube.shape[1:]) # define gaussian param arrays
chi2_arr = zeros(cube.shape[1:]) # define gaussian param arrays
resid_arr = zeros(cube.shape[1:]) # define gaussian param arrays
offset_arr = zeros(cube.shape[1:]) # define gaussian param arrays
offset_arr1 = zeros(cube.shape[1:]) # define gaussian param arrays
offset_arr2 = zeros(cube.shape[1:]) # define gaussian param arrays
ncarr = (cube.max(axis=0) > mean_std*nsig) # cutoff: don't fit no-signal spectra
starttime = time.time() # timing for output
print cube.shape
print "Fitting a total of %i spectra with peak signal above %f" % (ncarr.sum(),mean_std*nsig)
for i in xrange(cube.shape[1]): # Loop over all elements for
t0 = time.time()
nspec = (cube[:,i,:].max(axis=0) > mean_std*nsig).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if cube[:,i,j].max() > mean_std*nsig:
# if cube[:,i,j].max() > MAD(cube[:,i,j]):
pars = return_param(cube[:,i,j])
width_arr[i,j] = pars[1]
width_arr1[i,j] = pars[1]
amp_arr[i,j] = pars[2]
amp_arr1[i,j] = pars[2]
# chi2_arr[i,j] = sum(( gerr(cube[:,i,j])(pars) )**2)
resid_arr[i,j] = (gerr(cube[:,i,j])(pars)).sum()
offset_arr[i,j] = pars[0]
offset_arr1[i,j] = pars[0]
else:
width_arr1[i,j] = numpy.nan
chi2_arr[i,j] = numpy.nan
resid_arr[i,j] = numpy.nan
offset_arr1[i,j] = numpy.nan
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
else:
print
chi2_arr = resid_arr**2
resids = ma.masked_where(numpy.isnan(chi2_arr),chi2_arr) # hide bad values
# residcut = (resids.mean() + (resids.std() * nrsig) ) # Old versino - used standard deviation and mean
residcut = (nanmedian(chi2_arr.ravel()) + (MAD(chi2_arr.ravel()) * nrsig) ) # New version: set cutoff by median + nrsig * MAD
to_refit = (resids > residcut).astype('bool')
# to_refit[numpy.isnan(to_refit)] = 0
inds = array(nonzero(to_refit)).transpose()
dgc,tgc = 0,0
print "Refitting a total of %i spectra with peak residual above %f" % (to_refit.sum(),residcut)
f=open("%s_triples.txt" % prefix,'w')
# vconv = lambda x: (x-p3+1)*dv+v0 # convert to velocity frame
vind = vconv(arange(cube[:,0,0].shape[0]))
xind = arange(cube[:,0,0].shape[0])
for ind in inds:
i,j = ind
doublepars = return_double_param(cube[:,i,j])
old_chi2 = chi2_arr[i,j]
new_chi2 = sum(square( double_gerr(cube[:,i,j])(doublepars) ))
if new_chi2 < old_chi2: # if 2 gaussians is an improvement, use it!
chi2_arr[i,j] = new_chi2
width_arr1[i,j] = doublepars[2]
width_arr2[i,j] = doublepars[3]
amp_arr1[i,j] = doublepars[4]
amp_arr2[i,j] = doublepars[5]
offset_arr1[i,j] = doublepars[0]
offset_arr2[i,j] = doublepars[1]
ncarr[i,j] += 1
if new_chi2 > residcut: # Even if double was better, see if a triple might be better yet [but don't store it in the params arrays!]
print >>f,"Triple-gaussian fitting at %i,%i (%i'th double, %i'th triple)" % (i,j,dgc,tgc)
if tgc % 100 == 0:
print "Triple-gaussian fitting at %i,%i (%i'th double, %i'th triple)" % (i,j,dgc,tgc)
tgc += 1
tpguess = [doublepars[0],doublepars[1],(doublepars[0]+doublepars[1])/2.,doublepars[2],doublepars[3],doublepars[2]*5.,doublepars[4],doublepars[5],doublepars[4]/5.]
triplepars = return_triple_param(cube[:,i,j],params=tpguess)
pars = [offset_arr[i,j],width_arr[i,j],amp_arr[i,j]]
if doplot: # if you don't, there's really no point in fitting at all...
ax = axes([.05,.05,.7,.9])
plot(vind,cube[:,i,j],color='black',linestyle='steps',linewidth='.5')
plot(vind,gaussian(*pars)(xind),'r-.',label="Single %f" % ( (gerr(cube[:,i,j])(pars)).sum() ) )
plot(vind,double_gaussian(*doublepars)(xind),'g--',label="Double %f" % ( (double_gerr(cube[:,i,j])(doublepars)).sum() ))
plot(vind,triple_gaussian(*triplepars)(xind),'b:',label="Triple %f" % ( (triple_gerr(cube[:,i,j])(triplepars)).sum() ),linewidth=2)
pars[0] = vconv(pars[0])
text(1.05,.8,"c1 %3.2f w1 %3.2f a1 %3.2f" % tuple(pars),transform=ax.transAxes,size='smaller')
dp = [ vconv(doublepars[0]) , doublepars[2], doublepars[4], vconv(doublepars[1]), doublepars[3], doublepars[5] ]
text(1.05,.6,"c1 %3.2f w1 %3.2f a1 %3.2f\nc2 %3.2f w2 %3.2f a2 %3.2f" % tuple(dp),transform=ax.transAxes,size='smaller')
tp = [ vconv(triplepars[0]) , triplepars[3], triplepars[6], vconv(triplepars[1]), triplepars[4], triplepars[7], vconv(triplepars[2]), triplepars[5], triplepars[8] ]
text(1.05,.4,"c1 %3.2f w1 %3.2f a1 %3.2f\nc2 %3.2f w2 %3.2f a2 %3.2f\nc3 %3.2f w3 %3.2f a3 %3.2f" % tuple(tp),transform=ax.transAxes,size='smaller')
title("Spectrum at %s %s" % (ratos(xtora(i)),dectos(ytodec(j))) )
legend(loc='best')
savefig("%s_%s.%s.png" % (prefix,i,j))
clf()
ncarr[i,j] += 1
print >>f,triplepars
dgc += 1
f.close()
print "Total time %f seconds for %i double and %i triple gaussians" % (time.time()-starttime,dgc,tgc)
return width_arr1,width_arr2,chi2_arr,offset_arr1,offset_arr2,amp_arr1,amp_arr2,ncarr
def collapse_gaussfit(cube,axis=2,negamp=False):
std_coll = cube.std(axis=axis)
mean_std = median(std_coll.ravel())
if axis > 0:
cube = cube.swapaxes(0,axis)
width_arr = zeros(cube.shape[1:])
amp_arr = zeros(cube.shape[1:])
chi2_arr = zeros(cube.shape[1:])
offset_arr = zeros(cube.shape[1:])
starttime = time.time()
print cube.shape
print "Fitting a total of %i spectra with peak signal above %f" % ((cube.max(axis=0) > mean_std).sum(),mean_std)
for i in xrange(cube.shape[1]):
t0 = time.time()
nspec = (cube[:,i,:].max(axis=0) > mean_std).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if not negamp and cube[:,i,j].max() > mean_std:
pars = return_param(cube[:,i,j],negamp=negamp)
width_arr[i,j] = pars[1]
chi2_arr[i,j] = sum(( gerr(cube[:,i,j])(pars) )**2)
offset_arr[i,j] = pars[0]
amp_arr[i,j] = pars[2]
elif negamp and cube[:,i,j].min() < -1*mean_std:
pars = return_param(cube[:,i,j],negamp=negamp)
width_arr[i,j] = pars[1]
chi2_arr[i,j] = sum(( gerr(cube[:,i,j])(pars) )**2)
offset_arr[i,j] = pars[0]
amp_arr[i,j] = pars[2]
else:
width_arr[i,j] = numpy.nan
chi2_arr[i,j] = numpy.nan
offset_arr[i,j] = numpy.nan
amp_arr[i,j] = numpy.nan
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
print "Total time %f seconds" % (time.time()-starttime)
return width_arr,offset_arr,amp_arr,chi2_arr
# next step: find 2-gaussian fits
def collapse_double_gaussfit(cube,axis=2):
std_coll = cube.std(axis=axis)
mean_std = median(std_coll.ravel())
if axis > 0:
cube = cube.swapaxes(0,axis)
width_arr1 = zeros(cube.shape[1:])
width_arr2 = zeros(cube.shape[1:])
amp_arr1 = zeros(cube.shape[1:])
amp_arr2 = zeros(cube.shape[1:])
chi2_arr = zeros(cube.shape[1:])
offset_arr1 = zeros(cube.shape[1:])
offset_arr2 = zeros(cube.shape[1:])
starttime = time.time()
print cube.shape
print "Fitting a total of %i spectra with peak signal above %f" % ((cube.max(axis=0) > mean_std).sum(),mean_std)
for i in xrange(cube.shape[1]):
t0 = time.time()
nspec = (cube[:,i,:].max(axis=0) > mean_std).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if cube[:,i,j].max() > mean_std:
pars = return_double_param(cube[:,i,j])
width_arr1[i,j] = pars[2]
width_arr2[i,j] = pars[3]
amp_arr1[i,j] = pars[4]
amp_arr2[i,j] = pars[5]
chi2_arr[i,j] = sum(( double_gerr(cube[:,i,j])(pars) )**2)
offset_arr1[i,j] = pars[0]
offset_arr2[i,j] = pars[1]
else:
width_arr1[i,j] = numpy.nan
width_arr2[i,j] = numpy.nan
chi2_arr[i,j] = numpy.nan
offset_arr1[i,j] = numpy.nan
offset_arr2[i,j] = numpy.nan
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
print "Total time %f seconds" % (time.time()-starttime)
return width_arr1,width_arr2,chi2_arr,offset_arr1,offset_arr2,amp_arr1,amp_arr2
def wrap_collapse_gauss(filename,outprefix,redo='no'):
"""
redo - if not equal to 'no', then...
if collapse_gaussfit succeeded (to the extent that the .pysav files were written),
but some part of the file writing or successive procedures failed, re-do those
procedures without redoing the whole collapse
"""
fitsfile = pyfits.open(filename)
dv,v0,p3 = fitsfile[0].header['CD3_3'],fitsfile[0].header['CRVAL3'],fitsfile[0].header['CRPIX3']
cube = fitsfile[0].data
cube = where(numpy.isnan(cube),0,cube)
if redo=='no':
doubleB = asarray(collapse_double_gaussfit(cube,axis=0))
doubleB[numpy.isnan(doubleB)] = 0
pickle.dump(doubleB,open('%s_doubleB.pysav' % outprefix,'w'))
else:
doubleB = pickle.load(open('%s_doubleB.pysav' % outprefix,'r'))
db = doubleB
gcd = double_gaussian(db[3],db[4],db[0],db[1],db[5],db[6])(indices(cube.shape)[0])
fitsfile[0].data = gcd
fitsfile.writeto('%s_doublegausscube.fits' % outprefix,clobber=True)
gcd[numpy.isnan(gcd)] = 0
doubleResids = cube-gcd
fitsfile[0].data = doubleResids
fitsfile.writeto('%s_doublegaussresids.fits' % outprefix,clobber=True)
#doubleB[4] = (doubleB[4]-v0) / dv + p3-1
#doubleB[3] = (doubleB[3]-v0) / dv + p3-1
doubleB[4] = (doubleB[4]-p3+1) * dv + v0
doubleB[3] = (doubleB[3]-p3+1) * dv + v0
fitsfile[0].data = asarray(doubleB)
fitsfile.writeto('%s_doublegausspars.fits' % outprefix,clobber=True)
if redo=='no':
singleB = asarray(collapse_gaussfit(cube,axis=0))
pickle.dump(singleB,open('%s_singleB.pysav' % outprefix,'w'))
else:
singleB = pickle.load(open('%s_singleB.pysav' % outprefix,'r'))
gc = gaussian(singleB[1],singleB[0],singleB[2])(indices(cube.shape)[0])
singleB[1] = (singleB[1]-p3+1) * dv + v0
fitsfile[0].data = gc
fitsfile.writeto('%s_singlegausscube.fits' % outprefix,clobber=True)
gc[numpy.isnan(gc)]=0
singleResids = cube-gc
fitsfile[0].data = singleResids
fitsfile.writeto('%s_singlegaussresids.fits' % outprefix,clobber=True)
fitsfile[0].data = asarray(singleB)
fitsfile.writeto('%s_singlegausspars.fits' % outprefix,clobber=True)
fitsfile[0].header.__delitem__('CD3_3')
fitsfile[0].header.__delitem__('CRVAL3')
fitsfile[0].header.__delitem__('CRPIX3')
fitsfile[0].header.__delitem__('CUNIT3')
fitsfile[0].header.__delitem__('CTYPE3')
doubleResids[numpy.isnan(doubleResids)] = 0
totalDResids = doubleResids.sum(axis=0)
fitsfile[0].data = totalDResids
fitsfile.writeto('%s_doublegauss_totalresids.fits' % outprefix,clobber=True)
singleResids[numpy.isnan(singleResids)] = 0
totalSResids = singleResids.sum(axis=0)
fitsfile[0].data = totalSResids
fitsfile.writeto('%s_singlegauss_totalresids.fits' % outprefix,clobber=True)
return singleB,doubleB
def wrap_collapse_adaptive(filename,outprefix,redo='no',nsig=5,nrsig=2,doplot=True):
"""
redo - if not equal to 'no', then...
if collapse_gaussfit succeeded (to the extent that the .pysav files were written),
but some part of the file writing or successive procedures failed, re-do those
procedures without redoing the whole collapse
"""
fitsfile = pyfits.open(filename)
dv,v0,p3 = fitsfile[0].header['CD3_3'],fitsfile[0].header['CRVAL3'],fitsfile[0].header['CRPIX3']
dr,r0,p1 = fitsfile[0].header['CD1_1'],fitsfile[0].header['CRVAL1'],fitsfile[0].header['CRPIX1']
dd,d0,p2 = fitsfile[0].header['CD2_2'],fitsfile[0].header['CRVAL2'],fitsfile[0].header['CRPIX2']
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
cube = fitsfile[0].data
cube = where(numpy.isnan(cube),0,cube)
if redo=='no':
adaptB = asarray(adaptive_collapse_gaussfit(cube,axis=0,prefix=outprefix+'_triple',
nsig=nsig,nrsig=nrsig,vconv=vconv,xtora=xtora,ytodec=ytodec,doplot=doplot))
adaptB[numpy.isnan(adaptB)] = 0
pickle.dump(adaptB,open('%s_adaptB.pysav' % outprefix,'w'))
else:
adaptB = pickle.load(open('%s_adaptB.pysav' % outprefix,'r'))
db = adaptB
gcd = double_gaussian(db[3],db[4],db[0],db[1],db[5],db[6])(indices(cube.shape)[0])
fitsfile[0].data = gcd
fitsfile.writeto('%s_adaptgausscube.fits' % outprefix,clobber=True)
gcd[numpy.isnan(gcd)] = 0
adaptResids = cube-gcd
fitsfile[0].data = adaptResids
fitsfile.writeto('%s_adaptgaussresids.fits' % outprefix,clobber=True)
#adaptB[4] = (adaptB[4]-v0) / dv + p3-1
#adaptB[3] = (adaptB[3]-v0) / dv + p3-1
adaptB[4] = (adaptB[4]-p3+1) * dv + v0
adaptB[3] = (adaptB[3]-p3+1) * dv + v0
fitsfile[0].data = asarray(adaptB)
fitsfile.writeto('%s_adaptgausspars.fits' % outprefix,clobber=True)
fitsfile[0].header.__delitem__('CD3_3')
fitsfile[0].header.__delitem__('CRVAL3')
fitsfile[0].header.__delitem__('CRPIX3')
fitsfile[0].header.__delitem__('CUNIT3')
fitsfile[0].header.__delitem__('CTYPE3')
adaptResids[numpy.isnan(adaptResids)] = 0
totalDResids = adaptResids.sum(axis=0)
fitsfile[0].data = totalDResids
fitsfile.writeto('%s_adaptgauss_totalresids.fits' % outprefix,clobber=True)
return adaptB
|
kogotko/carburetor
|
refs/heads/master
|
openstack_dashboard/enabled/_1640_project_template_versions_panel.py
|
9
|
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'stacks.template_versions'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'project'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'orchestration'
# Python panel class of the PANEL to be added.
ADD_PANEL = ('openstack_dashboard.dashboards.project.'
'stacks.template_versions.panel.TemplateVersions')
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnresolvedReferencesInspection/NamespacePackageAttributes/p1/m1.py
|
819
|
def foo():
pass
|
Intel-Corporation/tensorflow
|
refs/heads/master
|
tensorflow/python/eager/def_function_test.py
|
1
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class _ModelWithOptimizer(training.Model):
def __init__(self):
super(_ModelWithOptimizer, self).__init__()
self.dense = core.Dense(1)
self.optimizer = adam.AdamOptimizer(0.01)
@def_function.function(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)))
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {'loss': loss}
class _HasDecoratedMethod(object):
@def_function.function
def f(self, x):
return x * 3.
# pylint: disable=bad-continuation,anomalous-backslash-in-string
MIXING_GRAPH_EAGER_TENSORS_ERROR = (
"""An op outside of the function building code is being passed
a "Graph" tensor. It is possible to have Graph tensors
leak out of the function building context by including a
tf.init_scope in your function building code.
For example, the following function will fail:
@tf.function
def has_init_scope\(\):
my_constant = tf.constant\(1.\)
with tf.init_scope\(\):
added = my_constant \* 2
The graph tensor has name: Const:0""")
# pylint: enable=bad-continuation,anomalous-backslash-in-string
class DefFunctionTest(test.TestCase):
def testNoVariables(self):
@def_function.function
def fn(x):
return 2 * x
self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)
def testFailIfVariablesAreCreatedMoreThanOnce(self):
@def_function.function
def fn(x):
return variables.Variable(1.0) + x
with self.assertRaises(ValueError):
fn(1.0)
def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):
state = []
@def_function.function
def fn(x):
state.append(variables.Variable(1.0))
return state[-1] + x
with self.assertRaises(ValueError):
fn(1.0)
def testCorrectVariableCreation(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testFunctionInitializer(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
def testFunctionInitializationFunction(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
init_fn = fn.get_initialization_function(constant_op.constant(1.0))
self.assertEqual(len(state), 1)
self.assertFalse(
resource_variable_ops.var_is_initialized_op(state[0].handle))
init_fn()
self.assertEqual(state[0].numpy(), 2.0)
def testVariableInitializerNotConstant(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testLegacyGraphModeVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 2.0)
self.assertAllEqual(self.evaluate(result), 6.0)
def testLegacyGraphModeVariablesNonTrivialInitializer(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
two = constant_op.constant(2.0)
four = two * two
two_again = math_ops.sqrt(four)
state.append(variables.Variable(two_again + four))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 6.0)
self.assertAllEqual(self.evaluate(result), 18.0)
def testLegacyGraphModeInputDependentInitializerFails(self):
with ops.Graph().as_default():
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
with self.assertRaisesRegexp(
lift_to_graph.UnliftableError, r'transitively.* mul .* x'):
fn(constant_op.constant(3.0))
def testMethod(self):
class MyModel(object):
def __init__(self):
self.var = None
@def_function.function
def apply(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return self.var * x
m0 = MyModel()
self.assertAllEqual(m0.apply(3.0), 6.0)
# Calling twice to exercise that we do not recreate variables.
m0.var.assign(3.0)
self.assertAllEqual(m0.apply(3.0), 9.0)
m1 = MyModel()
self.assertAllEqual(m1.apply(3.0), 6.0)
def test_functools_partial(self):
self.assertAllClose(
3.,
def_function.function(functools.partial(lambda x, y: x + y, 1.))(
constant_op.constant(2.)))
def test_functools_partial_new_default(self):
def f(x=3, y=7):
return x + y
func = def_function.function(functools.partial(f, y=6))
self.assertEqual(func().numpy(), 9)
self.assertEqual(func(y=8).numpy(), 11)
def test_functools_partial_keywords(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.zeros([1])))
self.assertAllEqual(func(), [0.0])
def test_functools_partial_single_positional(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, constant_op.constant(1)))
self.assertAllEqual(func(5), 6)
def test_unspecified_default_argument(self):
wrapped = def_function.function(
lambda x, y=2: x + y,
input_signature=[tensor_spec.TensorSpec((), dtypes.int32)])
self.assertEqual(3, wrapped(constant_op.constant(1)).numpy())
def test_optimizer(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model = _ModelWithOptimizer()
model(x, y)
def test_concrete_function_from_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def compute(x):
return 2. * x
concrete = compute.get_concrete_function()
self.assertAllClose(1., concrete(constant_op.constant(0.5)))
concrete = compute.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(4., concrete(constant_op.constant(2.)))
signature_args, _ = concrete.structured_input_signature
self.assertEqual(signature_args,
(tensor_spec.TensorSpec(
None, dtypes.float32, name='x'),))
def test_concrete_function_keyword_arguments(self):
@def_function.function
def f(x):
return x
conc = f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32, 'y'))
conc(y=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('y', signature_args[0].name)
conc = f.get_concrete_function(tensor_spec.TensorSpec(None, dtypes.float32))
conc(x=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('x', signature_args[0].name)
@def_function.function
def g(x):
return x[0]
conc = g.get_concrete_function(
[tensor_spec.TensorSpec(None, dtypes.float32, 'z'), 2])
conc(z=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('z', signature_args[0][0].name)
with self.assertRaisesRegexp(
ValueError, 'either zero or all names have to be specified'):
conc = g.get_concrete_function([
tensor_spec.TensorSpec(None, dtypes.float32, 'z'),
tensor_spec.TensorSpec(None, dtypes.float32),
])
def test_error_inner_capture(self):
@def_function.function
def f(inputs):
num_steps, _ = inputs.shape[:2]
outputs = []
for t in math_ops.range(num_steps):
outputs.append(inputs[t])
return outputs
with self.assertRaisesRegexp(ValueError, 'inner'):
f(array_ops.zeros(shape=(8, 42, 3)))
def testRuntimeErrorNotSticky(self):
@def_function.function
def fail(i):
control_flow_ops.Assert(math_ops.equal(i, 0), ['ick'])
fail(constant_op.constant(0)) # OK
with self.assertRaises(errors.InvalidArgumentError):
fail(constant_op.constant(1)) # InvalidArgument: "ick"
fail(constant_op.constant(0)) # OK
def testUnderscoreName(self):
@def_function.function
def f(_):
return _ + _
self.assertAllEqual(2.0, f(constant_op.constant(1.0)))
def test_serialization_signature_cache(self):
@def_function.function
def f(x, y):
return x, y
f(constant_op.constant([[3., 4.]]), constant_op.constant([2.]))
f(constant_op.constant([[3, 4, 5]]), constant_op.constant([2]))
signatures_args = set()
concrete_functions = f._list_all_concrete_functions_for_serialization()
for concrete_function in concrete_functions:
args, kwargs = concrete_function.structured_input_signature
signatures_args.add(args)
self.assertEqual(dict(), kwargs)
self.assertEqual(
signatures_args,
set(((tensor_spec.TensorSpec([1, 2], dtypes.float32, name='x'),
tensor_spec.TensorSpec([1], dtypes.float32, name='y')),
(tensor_spec.TensorSpec([1, 3], dtypes.int32, name='x'),
tensor_spec.TensorSpec([1], dtypes.int32, name='y')))))
@test_util.assert_no_garbage_created
def testFunctionReferenceCycles(self):
fn = def_function.function(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_garbage_created
def testMethodReferenceCycles(self):
has_decorated_method = _HasDecoratedMethod()
has_decorated_method.f(constant_op.constant(5.))
weak_fn = weakref.ref(has_decorated_method.f)
del has_decorated_method
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
def testErrorMessageWhenGraphTensorIsPassedToEager(self):
@def_function.function
def failing_function():
a = constant_op.constant(1.)
with ops.init_scope():
_ = a + a
with self.assertRaisesRegexp(TypeError, MIXING_GRAPH_EAGER_TENSORS_ERROR):
failing_function()
def testVariableCreatorScope(self):
created_variables = []
captured_variables = []
@def_function.function
def f():
if not created_variables:
created_variables.append(variables.Variable(1.))
return created_variables[0] + 1.
def capture_creator(next_creator, **kwargs):
created = next_creator(**kwargs)
captured_variables.append(created)
return created
with variable_scope.variable_creator_scope(capture_creator):
f()
self.assertEqual(created_variables, captured_variables)
def testVarAlreadyInitializedNoClobbering(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
add_var.get_concrete_function(constant_op.constant(2.))
self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))
def testSameVariableTwice(self):
v = variables.Variable(1.0)
@def_function.function
def add(a, b):
return a + b
self.assertAllEqual(add(v, v), 2.0)
def testShapeCache(self):
@def_function.function
def func(x):
return 2 * x
func_a = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
func_b = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
self.assertIs(func_a, func_b)
def testInitializationInNestedCall(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
@def_function.function
def wrapper(x):
return add_var(x)
self.assertAllClose([13., 14.], wrapper(constant_op.constant(2.)))
v_holder[1].assign(11.)
self.assertAllClose([14., 15.], wrapper(constant_op.constant(2.)))
@test_util.run_gpu_only
def testDeviceAnnotationRespected(self):
a = []
@def_function.function()
def create_variable():
with ops.init_scope():
initial_value = random_ops.random_uniform(
(2, 2), maxval=1000000, dtype=dtypes.int64)
if not a:
with ops.device("CPU:0"):
a.append(resource_variable_ops.ResourceVariable(initial_value))
return a[0].read_value()
created_variable_read = create_variable()
self.assertRegexpMatches(created_variable_read.device, "CPU")
def testDecorate(self):
func = def_function.function(lambda: 1)
def decorator(f):
return lambda: 1 + f()
func._decorate(decorator)
self.assertEqual(func().numpy(), 2)
def testLiftPlaceholderInitializedVariable(self):
with ops.Graph().as_default():
var_list = []
@def_function.function
def use_variable():
if not var_list:
initial_value = array_ops.placeholder(shape=[], dtype=dtypes.float32)
v = variables.Variable(initial_value)
var_list.append(v)
return var_list[0] + 1.
var_plus_one = use_variable()
with self.session() as session:
init_op = var_list[0].initializer
session.run(init_op, feed_dict={init_op.inputs[1]: 2.})
self.assertEqual(3., session.run(var_plus_one))
def testDecorate_rejectedAfterTrace(self):
func = def_function.function(lambda: 1)
self.assertEqual(func().numpy(), 1)
msg = 'Functions cannot be decorated after they have been traced.'
with self.assertRaisesRegexp(ValueError, msg):
func._decorate(lambda f: f)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
sander76/home-assistant
|
refs/heads/dev
|
homeassistant/components/google_travel_time/sensor.py
|
1
|
"""Support for Google travel time sensors."""
from datetime import datetime, timedelta
import logging
import googlemaps
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_API_KEY,
CONF_MODE,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
TIME_MINUTES,
)
from homeassistant.helpers import location
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by Google"
CONF_DESTINATION = "destination"
CONF_OPTIONS = "options"
CONF_ORIGIN = "origin"
CONF_TRAVEL_MODE = "travel_mode"
DEFAULT_NAME = "Google Travel Time"
SCAN_INTERVAL = timedelta(minutes=5)
ALL_LANGUAGES = [
"ar",
"bg",
"bn",
"ca",
"cs",
"da",
"de",
"el",
"en",
"es",
"eu",
"fa",
"fi",
"fr",
"gl",
"gu",
"hi",
"hr",
"hu",
"id",
"it",
"iw",
"ja",
"kn",
"ko",
"lt",
"lv",
"ml",
"mr",
"nl",
"no",
"pl",
"pt",
"pt-BR",
"pt-PT",
"ro",
"ru",
"sk",
"sl",
"sr",
"sv",
"ta",
"te",
"th",
"tl",
"tr",
"uk",
"vi",
"zh-CN",
"zh-TW",
]
AVOID = ["tolls", "highways", "ferries", "indoor"]
TRANSIT_PREFS = ["less_walking", "fewer_transfers"]
TRANSPORT_TYPE = ["bus", "subway", "train", "tram", "rail"]
TRAVEL_MODE = ["driving", "walking", "bicycling", "transit"]
TRAVEL_MODEL = ["best_guess", "pessimistic", "optimistic"]
UNITS = ["metric", "imperial"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRAVEL_MODE): vol.In(TRAVEL_MODE),
vol.Optional(CONF_OPTIONS, default={CONF_MODE: "driving"}): vol.All(
dict,
vol.Schema(
{
vol.Optional(CONF_MODE, default="driving"): vol.In(TRAVEL_MODE),
vol.Optional("language"): vol.In(ALL_LANGUAGES),
vol.Optional("avoid"): vol.In(AVOID),
vol.Optional("units"): vol.In(UNITS),
vol.Exclusive("arrival_time", "time"): cv.string,
vol.Exclusive("departure_time", "time"): cv.string,
vol.Optional("traffic_model"): vol.In(TRAVEL_MODEL),
vol.Optional("transit_mode"): vol.In(TRANSPORT_TYPE),
vol.Optional("transit_routing_preference"): vol.In(TRANSIT_PREFS),
}
),
),
}
)
TRACKABLE_DOMAINS = ["device_tracker", "sensor", "zone", "person"]
DATA_KEY = "google_travel_time"
def convert_time_to_utc(timestr):
"""Take a string like 08:00:00 and convert it to a unix timestamp."""
combined = datetime.combine(
dt_util.start_of_local_day(), dt_util.parse_time(timestr)
)
if combined < datetime.now():
combined = combined + timedelta(days=1)
return dt_util.as_timestamp(combined)
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the Google travel time platform."""
def run_setup(event):
"""
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
hass.data.setdefault(DATA_KEY, [])
options = config.get(CONF_OPTIONS)
if options.get("units") is None:
options["units"] = hass.config.units.name
travel_mode = config.get(CONF_TRAVEL_MODE)
mode = options.get(CONF_MODE)
if travel_mode is not None:
wstr = (
"Google Travel Time: travel_mode is deprecated, please "
"add mode to the options dictionary instead!"
)
_LOGGER.warning(wstr)
if mode is None:
options[CONF_MODE] = travel_mode
titled_mode = options.get(CONF_MODE).title()
formatted_name = f"{DEFAULT_NAME} - {titled_mode}"
name = config.get(CONF_NAME, formatted_name)
api_key = config.get(CONF_API_KEY)
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
sensor = GoogleTravelTimeSensor(
hass, name, api_key, origin, destination, options
)
hass.data[DATA_KEY].append(sensor)
if sensor.valid_api_connection:
add_entities_callback([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class GoogleTravelTimeSensor(SensorEntity):
"""Representation of a Google travel time sensor."""
def __init__(self, hass, name, api_key, origin, destination, options):
"""Initialize the sensor."""
self._hass = hass
self._name = name
self._options = options
self._unit_of_measurement = TIME_MINUTES
self._matrix = None
self.valid_api_connection = True
# Check if location is a trackable entity
if origin.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
self._client = googlemaps.Client(api_key, timeout=10)
try:
self.update()
except googlemaps.exceptions.ApiError as exp:
_LOGGER.error(exp)
self.valid_api_connection = False
return
@property
def state(self):
"""Return the state of the sensor."""
if self._matrix is None:
return None
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
return round(_data["duration_in_traffic"]["value"] / 60)
if "duration" in _data:
return round(_data["duration"]["value"] / 60)
return None
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if self._matrix is None:
return None
res = self._matrix.copy()
res.update(self._options)
del res["rows"]
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
res["duration_in_traffic"] = _data["duration_in_traffic"]["text"]
if "duration" in _data:
res["duration"] = _data["duration"]["text"]
if "distance" in _data:
res["distance"] = _data["distance"]["text"]
res["origin"] = self._origin
res["destination"] = self._destination
res[ATTR_ATTRIBUTION] = ATTRIBUTION
return res
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from Google."""
options_copy = self._options.copy()
dtime = options_copy.get("departure_time")
atime = options_copy.get("arrival_time")
if dtime is not None and ":" in dtime:
options_copy["departure_time"] = convert_time_to_utc(dtime)
elif dtime is not None:
options_copy["departure_time"] = dtime
elif atime is None:
options_copy["departure_time"] = "now"
if atime is not None and ":" in atime:
options_copy["arrival_time"] = convert_time_to_utc(atime)
elif atime is not None:
options_copy["arrival_time"] = atime
# Convert device_trackers to google friendly location
if hasattr(self, "_origin_entity_id"):
self._origin = self._get_location_from_entity(self._origin_entity_id)
if hasattr(self, "_destination_entity_id"):
self._destination = self._get_location_from_entity(
self._destination_entity_id
)
self._destination = self._resolve_zone(self._destination)
self._origin = self._resolve_zone(self._origin)
if self._destination is not None and self._origin is not None:
self._matrix = self._client.distance_matrix(
self._origin, self._destination, **options_copy
)
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity state or attributes."""
entity = self._hass.states.get(entity_id)
if entity is None:
_LOGGER.error("Unable to find entity %s", entity_id)
self.valid_api_connection = False
return None
# Check if the entity has location attributes
if location.has_location(entity):
return self._get_location_from_attributes(entity)
# Check if device is in a zone
zone_entity = self._hass.states.get("zone.%s" % entity.state)
if location.has_location(zone_entity):
_LOGGER.debug(
"%s is in %s, getting zone location", entity_id, zone_entity.entity_id
)
return self._get_location_from_attributes(zone_entity)
# If zone was not found in state then use the state as the location
if entity_id.startswith("sensor."):
return entity.state
# When everything fails just return nothing
return None
@staticmethod
def _get_location_from_attributes(entity):
"""Get the lat/long string from an entities attributes."""
attr = entity.attributes
return f"{attr.get(ATTR_LATITUDE)},{attr.get(ATTR_LONGITUDE)}"
def _resolve_zone(self, friendly_name):
entities = self._hass.states.all()
for entity in entities:
if entity.domain == "zone" and entity.name == friendly_name:
return self._get_location_from_attributes(entity)
return friendly_name
|
jacksonwilliams/arsenalsuite
|
refs/heads/master
|
cpp/lib/PyQt4/examples/mainwindows/mdi/mdi.py
|
14
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
# This is only needed for Python v2 but is harmless for Python v3.
import sip
sip.setapi('QVariant', 2)
from PyQt4 import QtCore, QtGui
import mdi_rc
class MdiChild(QtGui.QTextEdit):
sequenceNumber = 1
def __init__(self):
super(MdiChild, self).__init__()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.isUntitled = True
def newFile(self):
self.isUntitled = True
self.curFile = "document%d.txt" % MdiChild.sequenceNumber
MdiChild.sequenceNumber += 1
self.setWindowTitle(self.curFile + '[*]')
self.document().contentsChanged.connect(self.documentWasModified)
def loadFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot read file %s:\n%s." % (fileName, file.errorString()))
return False
instr = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self.setPlainText(instr.readAll())
QtGui.QApplication.restoreOverrideCursor()
self.setCurrentFile(fileName)
self.document().contentsChanged.connect(self.documentWasModified)
return True
def save(self):
if self.isUntitled:
return self.saveAs()
else:
return self.saveFile(self.curFile)
def saveAs(self):
fileName = QtGui.QFileDialog.getSaveFileName(self, "Save As",
self.curFile)
if not fileName:
return False
return self.saveFile(fileName)
def saveFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot write file %s:\n%s." % (fileName, file.errorString()))
return False
outstr = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
outstr << self.toPlainText()
QtGui.QApplication.restoreOverrideCursor()
self.setCurrentFile(fileName)
return True
def userFriendlyCurrentFile(self):
return self.strippedName(self.curFile)
def currentFile(self):
return self.curFile
def closeEvent(self, event):
if self.maybeSave():
event.accept()
else:
event.ignore()
def documentWasModified(self):
self.setWindowModified(self.document().isModified())
def maybeSave(self):
if self.document().isModified():
ret = QtGui.QMessageBox.warning(self, "MDI",
"'%s' has been modified.\nDo you want to save your "
"changes?" % self.userFriendlyCurrentFile(),
QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard |
QtGui.QMessageBox.Cancel)
if ret == QtGui.QMessageBox.Save:
return self.save()
elif ret == QtGui.QMessageBox.Cancel:
return False
return True
def setCurrentFile(self, fileName):
self.curFile = QtCore.QFileInfo(fileName).canonicalFilePath()
self.isUntitled = False
self.document().setModified(False)
self.setWindowModified(False)
self.setWindowTitle(self.userFriendlyCurrentFile() + "[*]")
def strippedName(self, fullFileName):
return QtCore.QFileInfo(fullFileName).fileName()
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.mdiArea = QtGui.QMdiArea()
self.mdiArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.mdiArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.setCentralWidget(self.mdiArea)
self.mdiArea.subWindowActivated.connect(self.updateMenus)
self.windowMapper = QtCore.QSignalMapper(self)
self.windowMapper.mapped[QtGui.QWidget].connect(self.setActiveSubWindow)
self.createActions()
self.createMenus()
self.createToolBars()
self.createStatusBar()
self.updateMenus()
self.readSettings()
self.setWindowTitle("MDI")
self.setUnifiedTitleAndToolBarOnMac(True)
def closeEvent(self, event):
self.mdiArea.closeAllSubWindows()
if self.mdiArea.currentSubWindow():
event.ignore()
else:
self.writeSettings()
event.accept()
def newFile(self):
child = self.createMdiChild()
child.newFile()
child.show()
def open(self):
fileName = QtGui.QFileDialog.getOpenFileName(self)
if fileName:
existing = self.findMdiChild(fileName)
if existing:
self.mdiArea.setActiveSubWindow(existing)
return
child = self.createMdiChild()
if child.loadFile(fileName):
self.statusBar().showMessage("File loaded", 2000)
child.show()
else:
child.close()
def save(self):
if self.activeMdiChild() and self.activeMdiChild().save():
self.statusBar().showMessage("File saved", 2000)
def saveAs(self):
if self.activeMdiChild() and self.activeMdiChild().saveAs():
self.statusBar().showMessage("File saved", 2000)
def cut(self):
if self.activeMdiChild():
self.activeMdiChild().cut()
def copy(self):
if self.activeMdiChild():
self.activeMdiChild().copy()
def paste(self):
if self.activeMdiChild():
self.activeMdiChild().paste()
def about(self):
QtGui.QMessageBox.about(self, "About MDI",
"The <b>MDI</b> example demonstrates how to write multiple "
"document interface applications using Qt.")
def updateMenus(self):
hasMdiChild = (self.activeMdiChild() is not None)
self.saveAct.setEnabled(hasMdiChild)
self.saveAsAct.setEnabled(hasMdiChild)
self.pasteAct.setEnabled(hasMdiChild)
self.closeAct.setEnabled(hasMdiChild)
self.closeAllAct.setEnabled(hasMdiChild)
self.tileAct.setEnabled(hasMdiChild)
self.cascadeAct.setEnabled(hasMdiChild)
self.nextAct.setEnabled(hasMdiChild)
self.previousAct.setEnabled(hasMdiChild)
self.separatorAct.setVisible(hasMdiChild)
hasSelection = (self.activeMdiChild() is not None and
self.activeMdiChild().textCursor().hasSelection())
self.cutAct.setEnabled(hasSelection)
self.copyAct.setEnabled(hasSelection)
def updateWindowMenu(self):
self.windowMenu.clear()
self.windowMenu.addAction(self.closeAct)
self.windowMenu.addAction(self.closeAllAct)
self.windowMenu.addSeparator()
self.windowMenu.addAction(self.tileAct)
self.windowMenu.addAction(self.cascadeAct)
self.windowMenu.addSeparator()
self.windowMenu.addAction(self.nextAct)
self.windowMenu.addAction(self.previousAct)
self.windowMenu.addAction(self.separatorAct)
windows = self.mdiArea.subWindowList()
self.separatorAct.setVisible(len(windows) != 0)
for i, window in enumerate(windows):
child = window.widget()
text = "%d %s" % (i + 1, child.userFriendlyCurrentFile())
if i < 9:
text = '&' + text
action = self.windowMenu.addAction(text)
action.setCheckable(True)
action.setChecked(child is self.activeMdiChild())
action.triggered.connect(self.windowMapper.map)
self.windowMapper.setMapping(action, window)
def createMdiChild(self):
child = MdiChild()
self.mdiArea.addSubWindow(child)
child.copyAvailable.connect(self.cutAct.setEnabled)
child.copyAvailable.connect(self.copyAct.setEnabled)
return child
def createActions(self):
self.newAct = QtGui.QAction(QtGui.QIcon(':/images/new.png'), "&New",
self, shortcut=QtGui.QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QtGui.QAction(QtGui.QIcon(':/images/open.png'),
"&Open...", self, shortcut=QtGui.QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QtGui.QAction(QtGui.QIcon(':/images/save.png'),
"&Save", self, shortcut=QtGui.QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.saveAsAct = QtGui.QAction("Save &As...", self,
shortcut=QtGui.QKeySequence.SaveAs,
statusTip="Save the document under a new name",
triggered=self.saveAs)
self.exitAct = QtGui.QAction("E&xit", self,
shortcut=QtGui.QKeySequence.Quit,
statusTip="Exit the application",
triggered=QtGui.qApp.closeAllWindows)
self.cutAct = QtGui.QAction(QtGui.QIcon(':/images/cut.png'), "Cu&t",
self, shortcut=QtGui.QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QtGui.QAction(QtGui.QIcon(':/images/copy.png'),
"&Copy", self, shortcut=QtGui.QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QtGui.QAction(QtGui.QIcon(':/images/paste.png'),
"&Paste", self, shortcut=QtGui.QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.closeAct = QtGui.QAction("Cl&ose", self,
statusTip="Close the active window",
triggered=self.mdiArea.closeActiveSubWindow)
self.closeAllAct = QtGui.QAction("Close &All", self,
statusTip="Close all the windows",
triggered=self.mdiArea.closeAllSubWindows)
self.tileAct = QtGui.QAction("&Tile", self,
statusTip="Tile the windows",
triggered=self.mdiArea.tileSubWindows)
self.cascadeAct = QtGui.QAction("&Cascade", self,
statusTip="Cascade the windows",
triggered=self.mdiArea.cascadeSubWindows)
self.nextAct = QtGui.QAction("Ne&xt", self,
shortcut=QtGui.QKeySequence.NextChild,
statusTip="Move the focus to the next window",
triggered=self.mdiArea.activateNextSubWindow)
self.previousAct = QtGui.QAction("Pre&vious", self,
shortcut=QtGui.QKeySequence.PreviousChild,
statusTip="Move the focus to the previous window",
triggered=self.mdiArea.activatePreviousSubWindow)
self.separatorAct = QtGui.QAction(self)
self.separatorAct.setSeparator(True)
self.aboutAct = QtGui.QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QtGui.QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=QtGui.qApp.aboutQt)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.saveAsAct)
self.fileMenu.addSeparator()
action = self.fileMenu.addAction("Switch layout direction")
action.triggered.connect(self.switchLayoutDirection)
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu("&Edit")
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.windowMenu = self.menuBar().addMenu("&Window")
self.updateWindowMenu()
self.windowMenu.aboutToShow.connect(self.updateWindowMenu)
self.menuBar().addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def createToolBars(self):
self.fileToolBar = self.addToolBar("File")
self.fileToolBar.addAction(self.newAct)
self.fileToolBar.addAction(self.openAct)
self.fileToolBar.addAction(self.saveAct)
self.editToolBar = self.addToolBar("Edit")
self.editToolBar.addAction(self.cutAct)
self.editToolBar.addAction(self.copyAct)
self.editToolBar.addAction(self.pasteAct)
def createStatusBar(self):
self.statusBar().showMessage("Ready")
def readSettings(self):
settings = QtCore.QSettings('Trolltech', 'MDI Example')
pos = settings.value('pos', QtCore.QPoint(200, 200))
size = settings.value('size', QtCore.QSize(400, 400))
self.move(pos)
self.resize(size)
def writeSettings(self):
settings = QtCore.QSettings('Trolltech', 'MDI Example')
settings.setValue('pos', self.pos())
settings.setValue('size', self.size())
def activeMdiChild(self):
activeSubWindow = self.mdiArea.activeSubWindow()
if activeSubWindow:
return activeSubWindow.widget()
return None
def findMdiChild(self, fileName):
canonicalFilePath = QtCore.QFileInfo(fileName).canonicalFilePath()
for window in self.mdiArea.subWindowList():
if window.widget().currentFile() == canonicalFilePath:
return window
return None
def switchLayoutDirection(self):
if self.layoutDirection() == QtCore.Qt.LeftToRight:
QtGui.qApp.setLayoutDirection(QtCore.Qt.RightToLeft)
else:
QtGui.qApp.setLayoutDirection(QtCore.Qt.LeftToRight)
def setActiveSubWindow(self, window):
if window:
self.mdiArea.setActiveSubWindow(window)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
|
jjmiranda/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tests/test_video_mongo.py
|
7
|
# -*- coding: utf-8 -*-
"""Video xmodule tests in mongo."""
import ddt
import json
from collections import OrderedDict
from path import Path as path
from lxml import etree
from mock import patch, MagicMock, Mock
from nose.plugins.attrib import attr
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from xmodule.video_module import VideoDescriptor, bumper_utils, video_utils, rewrite_video_url
from xmodule.x_module import STUDENT_VIEW
from xmodule.tests.test_video import VideoDescriptorTestBase, instantiate_descriptor
from xmodule.tests.test_import import DummySystem
from xmodule.video_module.transcripts_utils import save_to_store, Transcript
from xmodule.modulestore.inheritance import own_metadata
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE
)
from edxval.api import (
create_profile, create_video, get_video_info, ValCannotCreateError, ValVideoNotFoundError
)
from . import BaseTestXmodule
from .test_video_xml import SOURCE_XML
from .test_video_handlers import TestVideo
@attr(shard=1)
class TestVideoYouTube(TestVideo):
METADATA = {}
def test_video_constructor(self):
"""Make sure that all parameters extracted correctly from xml"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
"saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state",
"autoplay": False,
"streams": "0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg",
"sub": "a_sub_file.srt.sjson",
"sources": sources,
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English", "uk": u"Українська"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
"recordedYoutubeIsAvailable": True,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
@attr(shard=1)
class TestVideoNonYouTube(TestVideo):
"""Integration tests: web client + mongo."""
DATA = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson"
download_video="true"
start_time="01:00:03" end_time="01:00:10"
>
<source src="example.mp4"/>
<source src="example.webm"/>
</video>
"""
MODEL_DATA = {
'data': DATA,
}
METADATA = {}
def test_video_constructor(self):
"""Make sure that if the 'youtube' attribute is omitted in XML, then
the template generates an empty string for the YouTube streams.
"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
"saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state",
"autoplay": False,
"streams": "1.00:3_yD_cEKoCk",
"sub": "a_sub_file.srt.sjson",
"sources": sources,
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
"recordedYoutubeIsAvailable": True,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
@attr(shard=1)
class TestGetHtmlMethod(BaseTestXmodule):
'''
Make sure that `get_html` works correctly.
'''
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestGetHtmlMethod, self).setUp()
self.setup_course()
self.default_metadata_dict = OrderedDict({
"saveStateUrl": "",
"autoplay": settings.FEATURES.get('AUTOPLAY_VIDEOS', True),
"streams": "1.00:3_yD_cEKoCk",
"sub": "a_sub_file.srt.sjson",
"sources": '[]',
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
"recordedYoutubeIsAvailable": True,
})
def test_get_html_track(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="{sub}" download_track="{download_track}"
start_time="01:00:03" end_time="01:00:10" download_video="true"
>
<source src="example.mp4"/>
<source src="example.webm"/>
{track}
{transcripts}
</video>
"""
cases = [
{
'download_track': u'true',
'track': u'<track src="http://www.example.com/track"/>',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': u'http://www.example.com/track',
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': u'a_sub_file.srt.sjson',
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': u'false',
'track': u'<track src="http://www.example.com/track"/>',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'',
'expected_track_url': u'a_sub_file.srt.sjson',
'transcripts': '<transcript language="uk" src="ukrainian.srt" />',
},
]
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': '',
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
metadata = self.default_metadata_dict
metadata['sources'] = sources
DATA = SOURCE_XML.format(
download_track=data['download_track'],
track=data['track'],
sub=data['sub'],
transcripts=data['transcripts'],
)
self.initialize_module(data=DATA)
track_url = self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'download'
).rstrip('/?')
context = self.item_descriptor.render(STUDENT_VIEW).content
metadata.update({
'transcriptLanguages': {"en": "English"} if not data['transcripts'] else {"uk": u'Українська'},
'transcriptLanguage': u'en' if not data['transcripts'] or data.get('sub') else u'uk',
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sub': data['sub'],
})
expected_context.update({
'transcript_download_format': (
None if self.item_descriptor.track and self.item_descriptor.download_track else 'srt'
),
'track': (
track_url if data['expected_track_url'] == u'a_sub_file.srt.sjson' else data['expected_track_url']
),
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(metadata)
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
def test_get_html_source(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
>
{sources}
</video>
"""
cases = [
# self.download_video == True
{
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [u'example.mp4', u'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': u'example.mp4',
'sources': [u'example.mp4', u'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': [],
'result': {},
},
# self.download_video == False
{
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'sources': [u'example.mp4', u'example.webm'],
},
},
]
initial_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources']
)
self.initialize_module(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def test_get_html_with_non_existent_edx_video_id(self):
"""
Tests the VideoModule get_html where a edx_video_id is given but a video is not found
"""
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
no_video_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "meow",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [u'example.mp4', u'example.webm'],
}
}
DATA = SOURCE_XML.format(
download_video=no_video_data['download_video'],
source=no_video_data['source'],
sources=no_video_data['sources'],
edx_video_id=no_video_data['edx_video_id']
)
self.initialize_module(data=DATA)
# Referencing a non-existent VAL ID in courseware won't cause an error --
# it'll just fall back to the values in the VideoDescriptor.
self.assertIn("example_source.mp4", self.item_descriptor.render(STUDENT_VIEW).content)
def test_get_html_with_mocked_edx_video_id(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
data = {
# test with download_video set to false and make sure download_video_link is not set (is None)
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "mock item",
'result': {
'download_video_link': None,
# make sure the desktop_mp4 url is included as part of the alternative sources.
'sources': [u'example.mp4', u'example.webm', u'http://www.meowmix.com'],
}
}
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['autoplay'] = False
metadata['sources'] = ""
initial_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata
}
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_module(data=DATA)
with patch('edxval.api.get_video_info') as mock_get_video_info:
mock_get_video_info.return_value = {
'url': '/edxval/video/example',
'edx_video_id': u'example',
'duration': 111.0,
'client_video_id': u'The example video',
'encoded_videos': [
{
'url': u'http://www.meowmix.com',
'file_size': 25556,
'bitrate': 9600,
'profile': u'desktop_mp4'
}
]
}
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def test_get_html_with_existing_edx_video_id(self):
# create test profiles and their encodings
encoded_videos = []
for profile, extension in [("desktop_webm", "webm"), ("desktop_mp4", "mp4")]:
create_profile(profile)
encoded_videos.append(
dict(
url=u"http://fake-video.edx.org/thundercats.{}".format(extension),
file_size=9000,
bitrate=42,
profile=profile,
)
)
result = create_video(
dict(
client_video_id="Thunder Cats",
duration=111,
edx_video_id="thundercats",
status='test',
encoded_videos=encoded_videos
)
)
self.assertEqual(result, "thundercats")
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "thundercats",
'result': {
'download_video_link': u'http://fake-video.edx.org/thundercats.mp4',
# make sure the urls for the various encodings are included as part of the alternative sources.
'sources': [u'example.mp4', u'example.webm'] +
[video['url'] for video in encoded_videos],
}
}
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['sources'] = ""
initial_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata,
}
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_module(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
# pylint: disable=invalid-name
@patch('xmodule.video_module.video_module.BrandingInfoConfig')
@patch('xmodule.video_module.video_module.rewrite_video_url')
def test_get_html_cdn_source(self, mocked_get_video, mock_BrandingInfoConfig):
"""
Test if sources got from CDN
"""
mock_BrandingInfoConfig.get_config.return_value = {
"CN": {
'url': 'http://www.xuetangx.com',
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com'
}
}
def side_effect(*args, **kwargs):
cdn = {
'http://example.com/example.mp4': 'http://cdn-example.com/example.mp4',
'http://example.com/example.webm': 'http://cdn-example.com/example.webm',
}
return cdn.get(args[1])
mocked_get_video.side_effect = side_effect
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
edx_video_id="{edx_video_id}"
start_time="01:00:03" end_time="01:00:10"
>
{sources}
</video>
"""
case_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="http://example.com/example.mp4"/>
<source src="http://example.com/example.webm"/>
""",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [
u'http://cdn-example.com/example.mp4',
u'http://cdn-example.com/example.webm'
],
},
}
# test with and without edx_video_id specified.
cases = [
dict(case_data, edx_video_id=""),
dict(case_data, edx_video_id="vid-v1:12345"),
]
initial_context = {
'branding_info': {
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com',
'url': 'http://www.xuetangx.com'
},
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': None,
'handout': None,
'id': None,
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id'],
)
self.initialize_module(data=DATA)
self.item_descriptor.xmodule_runtime.user_location = 'CN'
context = self.item_descriptor.render('student_view').content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
@attr(shard=1)
class TestVideoCDNRewriting(BaseTestXmodule):
"""
Tests for Video CDN.
"""
def setUp(self, *args, **kwargs):
super(TestVideoCDNRewriting, self).setUp(*args, **kwargs)
self.original_video_file = "original_video.mp4"
self.original_video_url = "http://www.originalvideo.com/" + self.original_video_file
@patch.dict("django.conf.settings.CDN_VIDEO_URLS",
{"CN": "https://chinacdn.cn/"})
def test_rewrite_video_url_success(self):
"""
Test successful CDN request.
"""
cdn_response_video_url = settings.CDN_VIDEO_URLS["CN"] + self.original_video_file
self.assertEqual(
rewrite_video_url(settings.CDN_VIDEO_URLS["CN"], self.original_video_url),
cdn_response_video_url
)
@patch.dict("django.conf.settings.CDN_VIDEO_URLS",
{"CN": "https://chinacdn.cn/"})
def test_rewrite_url_concat(self):
"""
Test that written URLs are returned clean despite input
"""
cdn_response_video_url = settings.CDN_VIDEO_URLS["CN"] + "original_video.mp4"
self.assertEqual(
rewrite_video_url(settings.CDN_VIDEO_URLS["CN"] + "///", self.original_video_url),
cdn_response_video_url
)
def test_rewrite_video_url_invalid_url(self):
"""
Test if no alternative video in CDN exists.
"""
invalid_cdn_url = 'http://http://fakecdn.com/'
self.assertIsNone(rewrite_video_url(invalid_cdn_url, self.original_video_url))
def test_none_args(self):
"""
Ensure None args return None
"""
self.assertIsNone(rewrite_video_url(None, None))
def test_emptystring_args(self):
"""
Ensure emptyrstring args return None
"""
self.assertIsNone(rewrite_video_url("", ""))
@attr(shard=1)
class TestVideoDescriptorInitialization(BaseTestXmodule):
"""
Make sure that module initialization works correctly.
"""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestVideoDescriptorInitialization, self).setUp()
self.setup_course()
def test_source_not_in_html5sources(self):
metadata = {
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertIn('source', fields)
self.assertEqual(self.item_descriptor.source, 'http://example.org/video.mp4')
self.assertTrue(self.item_descriptor.download_video)
self.assertTrue(self.item_descriptor.source_visible)
def test_source_in_html5sources(self):
metadata = {
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://example.org/video.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertNotIn('source', fields)
self.assertTrue(self.item_descriptor.download_video)
self.assertFalse(self.item_descriptor.source_visible)
def test_download_video_is_explicitly_set(self):
metadata = {
'track': u'http://some_track.srt',
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
'download_video': False,
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertIn('source', fields)
self.assertIn('download_video', fields)
self.assertFalse(self.item_descriptor.download_video)
self.assertTrue(self.item_descriptor.source_visible)
self.assertTrue(self.item_descriptor.download_track)
def test_source_is_empty(self):
metadata = {
'source': '',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertNotIn('source', fields)
self.assertFalse(self.item_descriptor.download_video)
@attr(shard=1)
@ddt.ddt
class TestEditorSavedMethod(BaseTestXmodule):
"""
Make sure that `editor_saved` method works correctly.
"""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestEditorSavedMethod, self).setUp()
self.setup_course()
self.metadata = {
'source': 'http://youtu.be/3_yD_cEKoCk',
'html5_sources': ['http://example.org/video.mp4'],
}
# path to subs_3_yD_cEKoCk.srt.sjson file
self.file_name = 'subs_3_yD_cEKoCk.srt.sjson'
# pylint: disable=no-value-for-parameter
self.test_dir = path(__file__).abspath().dirname().dirname().dirname().dirname().dirname()
self.file_path = self.test_dir + '/common/test/data/uploads/' + self.file_name
@ddt.data(TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE)
def test_editor_saved_when_html5_sub_not_exist(self, default_store):
"""
When there is youtube_sub exist but no html5_sub present for
html5_sources, editor_saved function will generate new html5_sub
for video.
"""
self.MODULESTORE = default_store # pylint: disable=invalid-name
self.initialize_module(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
with open(self.file_path, "r") as myfile:
save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location)
item.sub = "3_yD_cEKoCk"
# subs_video.srt.sjson does not exist before calling editor_saved function
with self.assertRaises(NotFoundError):
Transcript.get_asset(item.location, 'subs_video.srt.sjson')
old_metadata = own_metadata(item)
# calling editor_saved will generate new file subs_video.srt.sjson for html5_sources
item.editor_saved(self.user, old_metadata, None)
self.assertIsInstance(Transcript.get_asset(item.location, 'subs_3_yD_cEKoCk.srt.sjson'), StaticContent)
self.assertIsInstance(Transcript.get_asset(item.location, 'subs_video.srt.sjson'), StaticContent)
@ddt.data(TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE)
def test_editor_saved_when_youtube_and_html5_subs_exist(self, default_store):
"""
When both youtube_sub and html5_sub already exist then no new
sub will be generated by editor_saved function.
"""
self.MODULESTORE = default_store
self.initialize_module(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
with open(self.file_path, "r") as myfile:
save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location)
save_to_store(myfile.read(), 'subs_video.srt.sjson', 'text/sjson', item.location)
item.sub = "3_yD_cEKoCk"
# subs_3_yD_cEKoCk.srt.sjson and subs_video.srt.sjson already exist
self.assertIsInstance(Transcript.get_asset(item.location, self.file_name), StaticContent)
self.assertIsInstance(Transcript.get_asset(item.location, 'subs_video.srt.sjson'), StaticContent)
old_metadata = own_metadata(item)
with patch('xmodule.video_module.video_module.manage_video_subtitles_save') as manage_video_subtitles_save:
item.editor_saved(self.user, old_metadata, None)
self.assertFalse(manage_video_subtitles_save.called)
@ddt.ddt
class TestVideoDescriptorStudentViewJson(TestCase):
"""
Tests for the student_view_data method on VideoDescriptor.
"""
TEST_DURATION = 111.0
TEST_PROFILE = "mobile"
TEST_SOURCE_URL = "http://www.example.com/source.mp4"
TEST_LANGUAGE = "ge"
TEST_ENCODED_VIDEO = {
'profile': TEST_PROFILE,
'bitrate': 333,
'url': 'http://example.com/video',
'file_size': 222,
}
TEST_EDX_VIDEO_ID = 'test_edx_video_id'
TEST_YOUTUBE_ID = 'test_youtube_id'
TEST_YOUTUBE_EXPECTED_URL = 'https://www.youtube.com/watch?v=test_youtube_id'
def setUp(self):
super(TestVideoDescriptorStudentViewJson, self).setUp()
video_declaration = "<video display_name='Test Video' youtube_id_1_0=\'" + self.TEST_YOUTUBE_ID + "\'>"
sample_xml = ''.join([
video_declaration,
"<source src='", self.TEST_SOURCE_URL, "'/> ",
"<transcript language='", self.TEST_LANGUAGE, "' src='german_translation.srt' /> ",
"</video>"]
)
self.transcript_url = "transcript_url"
self.video = instantiate_descriptor(data=sample_xml)
self.video.runtime.handler_url = Mock(return_value=self.transcript_url)
def setup_val_video(self, associate_course_in_val=False):
"""
Creates a video entry in VAL.
Arguments:
associate_course - If True, associates the test course with the video in VAL.
"""
create_profile('mobile')
create_video({
'edx_video_id': self.TEST_EDX_VIDEO_ID,
'client_video_id': 'test_client_video_id',
'duration': self.TEST_DURATION,
'status': 'dummy',
'encoded_videos': [self.TEST_ENCODED_VIDEO],
'courses': [self.video.location.course_key] if associate_course_in_val else [],
})
self.val_video = get_video_info(self.TEST_EDX_VIDEO_ID) # pylint: disable=attribute-defined-outside-init
def get_result(self, allow_cache_miss=True):
"""
Returns the result from calling the video's student_view_data method.
Arguments:
allow_cache_miss is passed in the context to the student_view_data method.
"""
context = {
"profiles": [self.TEST_PROFILE],
"allow_cache_miss": "True" if allow_cache_miss else "False"
}
return self.video.student_view_data(context)
def verify_result_with_fallback_and_youtube(self, result):
"""
Verifies the result is as expected when returning "fallback" video data (not from VAL).
"""
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": None,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
"encoded_videos": {
"fallback": {"url": self.TEST_SOURCE_URL, "file_size": 0},
"youtube": {"url": self.TEST_YOUTUBE_EXPECTED_URL, "file_size": 0}
},
}
)
def verify_result_with_youtube_url(self, result):
"""
Verifies the result is as expected when returning "fallback" video data (not from VAL).
"""
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": None,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
"encoded_videos": {"youtube": {"url": self.TEST_YOUTUBE_EXPECTED_URL, "file_size": 0}},
}
)
def verify_result_with_val_profile(self, result):
"""
Verifies the result is as expected when returning video data from VAL.
"""
self.assertDictContainsSubset(
result.pop("encoded_videos")[self.TEST_PROFILE],
self.TEST_ENCODED_VIDEO,
)
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": self.TEST_DURATION,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
}
)
def test_only_on_web(self):
self.video.only_on_web = True
result = self.get_result()
self.assertDictEqual(result, {"only_on_web": True})
def test_no_edx_video_id(self):
result = self.get_result()
self.verify_result_with_fallback_and_youtube(result)
def test_no_edx_video_id_and_no_fallback(self):
video_declaration = "<video display_name='Test Video' youtube_id_1_0=\'{}\'>".format(self.TEST_YOUTUBE_ID)
# the video has no source listed, only a youtube link, so no fallback url will be provided
sample_xml = ''.join([
video_declaration,
"<transcript language='", self.TEST_LANGUAGE, "' src='german_translation.srt' /> ",
"</video>"
])
self.transcript_url = "transcript_url"
self.video = instantiate_descriptor(data=sample_xml)
self.video.runtime.handler_url = Mock(return_value=self.transcript_url)
result = self.get_result()
self.verify_result_with_youtube_url(result)
@ddt.data(True, False)
def test_with_edx_video_id_video_associated_in_val(self, allow_cache_miss):
"""
Tests retrieving a video that is stored in VAL and associated with a course in VAL.
"""
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
self.setup_val_video(associate_course_in_val=True)
# the video is associated in VAL so no cache miss should ever happen but test retrieval in both contexts
result = self.get_result(allow_cache_miss)
self.verify_result_with_val_profile(result)
@ddt.data(True, False)
def test_with_edx_video_id_video_unassociated_in_val(self, allow_cache_miss):
"""
Tests retrieving a video that is stored in VAL but not associated with a course in VAL.
"""
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
self.setup_val_video(associate_course_in_val=False)
result = self.get_result(allow_cache_miss)
if allow_cache_miss:
self.verify_result_with_val_profile(result)
else:
self.verify_result_with_fallback_and_youtube(result)
@ddt.data(True, False)
def test_with_edx_video_id_video_not_in_val(self, allow_cache_miss):
"""
Tests retrieving a video that is not stored in VAL.
"""
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
# The video is not in VAL so in contexts that do and don't allow cache misses we should always get a fallback
result = self.get_result(allow_cache_miss)
self.verify_result_with_fallback_and_youtube(result)
@attr(shard=1)
class VideoDescriptorTest(TestCase, VideoDescriptorTestBase):
"""
Tests for video descriptor that requires access to django settings.
"""
def setUp(self):
super(VideoDescriptorTest, self).setUp()
self.descriptor.runtime.handler_url = MagicMock()
def test_get_context(self):
""""
Test get_context.
This test is located here and not in xmodule.tests because get_context calls editable_metadata_fields.
Which, in turn, uses settings.LANGUAGES from django setttings.
"""
correct_tabs = [
{
'name': "Basic",
'template': "video/transcripts.html",
'current': True
},
{
'name': 'Advanced',
'template': 'tabs/metadata-edit-tab.html'
}
]
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], correct_tabs)
def test_export_val_data(self):
self.descriptor.edx_video_id = 'test_edx_video_id'
create_profile('mobile')
create_video({
'edx_video_id': self.descriptor.edx_video_id,
'client_video_id': 'test_client_video_id',
'duration': 111,
'status': 'dummy',
'encoded_videos': [{
'profile': 'mobile',
'url': 'http://example.com/video',
'file_size': 222,
'bitrate': 333,
}],
})
actual = self.descriptor.definition_to_xml(resource_fs=None)
expected_str = """
<video download_video="false" url_name="SampleProblem">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
</video_asset>
</video>
"""
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
def test_export_val_data_not_found(self):
self.descriptor.edx_video_id = 'nonexistent'
actual = self.descriptor.definition_to_xml(resource_fs=None)
expected_str = """<video download_video="false" url_name="SampleProblem"/>"""
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
def test_import_val_data(self):
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
</video_asset>
</video>
"""
id_generator = Mock()
id_generator.target_course_id = "test_course_id"
video = VideoDescriptor.from_xml(xml_data, module_system, id_generator)
self.assertEqual(video.edx_video_id, 'test_edx_video_id')
video_data = get_video_info(video.edx_video_id)
self.assertEqual(video_data['client_video_id'], 'test_client_video_id')
self.assertEqual(video_data['duration'], 111)
self.assertEqual(video_data['status'], 'imported')
self.assertEqual(video_data['courses'], [id_generator.target_course_id])
self.assertEqual(video_data['encoded_videos'][0]['profile'], 'mobile')
self.assertEqual(video_data['encoded_videos'][0]['url'], 'http://example.com/video')
self.assertEqual(video_data['encoded_videos'][0]['file_size'], 222)
self.assertEqual(video_data['encoded_videos'][0]['bitrate'], 333)
def test_import_val_data_invalid(self):
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
# Negative file_size is invalid
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="-222" bitrate="333"/>
</video_asset>
</video>
"""
with self.assertRaises(ValCannotCreateError):
VideoDescriptor.from_xml(xml_data, module_system, id_generator=Mock())
with self.assertRaises(ValVideoNotFoundError):
get_video_info("test_edx_video_id")
class TestVideoWithBumper(TestVideo):
"""
Tests rendered content in presence of video bumper.
"""
CATEGORY = "video"
METADATA = {}
FEATURES = settings.FEATURES
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
def test_is_bumper_enabled(self, get_bumper_settings):
"""
Check that bumper is (not)shown if ENABLE_VIDEO_BUMPER is (False)True
Assume that bumper settings are correct.
"""
self.FEATURES.update({
"SHOW_BUMPER_PERIODICITY": 1,
"ENABLE_VIDEO_BUMPER": True,
})
get_bumper_settings.return_value = {
"video_id": "edx_video_id",
"transcripts": {},
}
with override_settings(FEATURES=self.FEATURES):
self.assertTrue(bumper_utils.is_bumper_enabled(self.item_descriptor))
self.FEATURES.update({"ENABLE_VIDEO_BUMPER": False})
with override_settings(FEATURES=self.FEATURES):
self.assertFalse(bumper_utils.is_bumper_enabled(self.item_descriptor))
@patch('xmodule.video_module.bumper_utils.is_bumper_enabled')
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
@patch('edxval.api.get_urls_for_profiles')
def test_bumper_metadata(self, get_url_for_profiles, get_bumper_settings, is_bumper_enabled):
"""
Test content with rendered bumper metadata.
"""
get_url_for_profiles.return_value = {
"desktop_mp4": "http://test_bumper.mp4",
"desktop_webm": "",
}
get_bumper_settings.return_value = {
"video_id": "edx_video_id",
"transcripts": {},
}
is_bumper_enabled.return_value = True
content = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': json.dumps(OrderedDict({
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
"showCaptions": "true",
"sources": ["http://test_bumper.mp4"],
'streams': '',
"transcriptLanguage": "en",
"transcriptLanguages": {"en": "English"},
"transcriptTranslationUrl": video_utils.set_query_parameter(
self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'), 'is_bumper', 1
),
"transcriptAvailableTranslationsUrl": video_utils.set_query_parameter(
self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'), 'is_bumper', 1
),
})),
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
"saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state",
"autoplay": False,
"streams": "0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg",
"sub": "a_sub_file.srt.sjson",
"sources": sources,
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English", "uk": u"Українська"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
"recordedYoutubeIsAvailable": True,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': json.dumps(OrderedDict({
"url": "http://img.youtube.com/vi/ZwkTiUPN0mg/0.jpg",
"type": "youtube"
}))
}
expected_content = self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
self.assertEqual(content, expected_content)
|
robertdenton/twitter-list-to-csv
|
refs/heads/master
|
example-config.py
|
1
|
# Get your credentials at https://apps.twitter.com/
consumer_key = 'asdf'
consumer_secret = 'asdf'
access_token = 'asdf'
access_token_secret = 'asdf'
|
chainer/chainer
|
refs/heads/master
|
chainer/functions/normalization/__init__.py
|
12133432
| |
heisencoder/route-finder
|
refs/heads/master
|
third_party/lib/python/django/core/checks/compatibility/__init__.py
|
12133432
| |
reddymeghraj/showroom
|
refs/heads/master
|
erpnext/stock/doctype/price_list/test_price_list.py
|
121
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
# test_ignore = ["Item"]
test_records = frappe.get_test_records('Price List')
|
guard163/tarantool
|
refs/heads/master
|
doc/www/pelicanconf.py
|
8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import re
import urlparse
AUTHOR = u'Tarantool'
SITENAME = u'Tarantool - a NoSQL database running in a Lua application server'
SITEURL = 'http://tarantool.org'
PATH = 'content'
THEME = "theme"
TIMEZONE = 'Europe/Moscow'
DEFAULT_LANG = u'en'
PLUGINS = ['plugins.beautifulsite']
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
DEFAULT_PAGINATION = False
BSITE_PATH = ['newsite']
ARTICLE_EXCLUDES = ['doc', 'newsite']
JINJA_FILTERS = {
're_replace': (lambda s, i, o: re.sub(i, o, s)),
'url_split': (lambda s: re.sub('www\.', '', urlparse.urlsplit(s).netloc))
}
INDEX_SAVE_AS = ''
ARCHIVES_SAVE_AS = ''
AUTHORS_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
TAGS_SAVE_AS = ''
TAGS_SAVE_AS = ''
TAG_SAVE_AS = ''
STATIC_PATHS = [
'robots.txt',
'ycsb',
'js/highcharts.js',
'js/ie8.js',
'js/index_tabs.js',
'js/bench_tabs.js',
'js/main.js',
'js/old_tabs.js',
'js/select.js',
'js/filesize.min.js'
]
EXTRA_PATH_METADATA = {}
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
bd339/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/empty-message_wsh.py
|
284
|
#!/usr/bin/python
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = msgutil.receive_message(request)
if line == "":
msgutil.send_message(request, 'pass')
else:
msgutil.send_message(request, 'fail')
|
microelly2/cadquery-freecad-module
|
refs/heads/master
|
CadQuery/Libs/docutils/examples.py
|
180
|
# $Id: examples.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This module contains practical examples of Docutils client code.
Importing this module from client code is not recommended; its contents are
subject to change in future Docutils releases. Instead, it is recommended
that you copy and paste the parts you need into your own code, modifying as
necessary.
"""
from docutils import core, io
def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=True,
initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts
def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=True, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment
def internals(input_string, source_path=None, destination_path=None,
input_encoding='unicode', settings_overrides=None):
"""
Return the document tree and publisher, for exploring Docutils internals.
Parameters: see `html_parts()`.
"""
if settings_overrides:
overrides = settings_overrides.copy()
else:
overrides = {}
overrides['input_encoding'] = input_encoding
output, pub = core.publish_programmatically(
source_class=io.StringInput, source=input_string,
source_path=source_path,
destination_class=io.NullOutput, destination=None,
destination_path=destination_path,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='null',
settings=None, settings_spec=None, settings_overrides=overrides,
config_section=None, enable_exit_status=None)
return pub.writer.document, pub
|
kisel/trex-core
|
refs/heads/master
|
scripts/external_libs/scapy-2.3.1/python3/scapy/layers/x509.py
|
22
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
X.509 certificates.
"""
from scapy.asn1packet import *
from scapy.asn1fields import *
##########
## X509 ##
##########
######[ ASN1 class ]######
class ASN1_Class_X509(ASN1_Class_UNIVERSAL):
name="X509"
CONT0 = 0xa0
CONT1 = 0xa1
CONT2 = 0xa2
CONT3 = 0xa3
class ASN1_X509_CONT0(ASN1_SEQUENCE):
tag = ASN1_Class_X509.CONT0
class ASN1_X509_CONT1(ASN1_SEQUENCE):
tag = ASN1_Class_X509.CONT1
class ASN1_X509_CONT2(ASN1_SEQUENCE):
tag = ASN1_Class_X509.CONT2
class ASN1_X509_CONT3(ASN1_SEQUENCE):
tag = ASN1_Class_X509.CONT3
######[ BER codecs ]#######
class BERcodec_X509_CONT0(BERcodec_SEQUENCE):
tag = ASN1_Class_X509.CONT0
class BERcodec_X509_CONT1(BERcodec_SEQUENCE):
tag = ASN1_Class_X509.CONT1
class BERcodec_X509_CONT2(BERcodec_SEQUENCE):
tag = ASN1_Class_X509.CONT2
class BERcodec_X509_CONT3(BERcodec_SEQUENCE):
tag = ASN1_Class_X509.CONT3
######[ ASN1 fields ]######
class ASN1F_X509_CONT0(ASN1F_SEQUENCE):
ASN1_tag = ASN1_Class_X509.CONT0
class ASN1F_X509_CONT1(ASN1F_SEQUENCE):
ASN1_tag = ASN1_Class_X509.CONT1
class ASN1F_X509_CONT2(ASN1F_SEQUENCE):
ASN1_tag = ASN1_Class_X509.CONT2
class ASN1F_X509_CONT3(ASN1F_SEQUENCE):
ASN1_tag = ASN1_Class_X509.CONT3
######[ X509 packets ]######
class X509RDN(ASN1_Packet):
ASN1_codec = ASN1_Codecs.BER
ASN1_root = ASN1F_SET(
ASN1F_SEQUENCE( ASN1F_OID("oid","2.5.4.6"),
ASN1F_PRINTABLE_STRING("value","")
)
)
class X509v3Ext(ASN1_Packet):
ASN1_codec = ASN1_Codecs.BER
ASN1_root = ASN1F_field("val",ASN1_NULL(0))
class X509Cert(ASN1_Packet):
ASN1_codec = ASN1_Codecs.BER
ASN1_root = ASN1F_SEQUENCE(
ASN1F_SEQUENCE(
ASN1F_optionnal(ASN1F_X509_CONT0(ASN1F_INTEGER("version",3))),
ASN1F_INTEGER("sn",1),
ASN1F_SEQUENCE(ASN1F_OID("sign_algo","1.2.840.113549.1.1.5"),
ASN1F_field("sa_value",ASN1_NULL(0))),
ASN1F_SEQUENCE_OF("issuer",[],X509RDN),
ASN1F_SEQUENCE(ASN1F_UTC_TIME("not_before",ZuluTime(-600)), # ten minutes ago
ASN1F_UTC_TIME("not_after",ZuluTime(+86400))), # for 24h
ASN1F_SEQUENCE_OF("subject",[],X509RDN),
ASN1F_SEQUENCE(
ASN1F_SEQUENCE(ASN1F_OID("pubkey_algo","1.2.840.113549.1.1.1"),
ASN1F_field("pk_value",ASN1_NULL(0))),
ASN1F_BIT_STRING("pubkey","")
),
ASN1F_optionnal(ASN1F_X509_CONT3(ASN1F_SEQUENCE_OF("x509v3ext",[],X509v3Ext))),
),
ASN1F_SEQUENCE(ASN1F_OID("sign_algo2","1.2.840.113549.1.1.5"),
ASN1F_field("sa2_value",ASN1_NULL(0))),
ASN1F_BIT_STRING("signature","")
)
|
tntnatbry/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/pooling_ops_3d_test.py
|
29
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class PoolingTest(test.TestCase):
def _VerifyValues(self, pool_func, input_sizes, window, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called: co.MaxPool, co.AvgPool.
input_sizes: Input tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=True) as sess:
t = constant_op.constant(x, shape=input_sizes)
t = pool_func(
t,
ksize=[1, window[0], window[1], window[2], 1],
strides=[1, strides[0], strides[1], strides[2], 1],
padding=padding)
vals = sess.run(t)
# Verifies values.
actual = vals.flatten()
self.assertAllClose(expected, actual)
def testAvgPool3dValidPadding(self):
expected_output = [20.5, 21.5, 22.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID",
expected=expected_output)
def testAvgPool3dSamePadding(self):
expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 4, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME",
expected=expected_output)
def testAvgPool3dSamePaddingDifferentStrides(self):
expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=(1, 2, 3),
strides=(2, 3, 1),
padding="SAME",
expected=expected_output)
def testMaxPool3dValidPadding(self):
expected_output = [40.0, 41.0, 42.0]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID",
expected=expected_output)
def testMaxPool3dSamePadding(self):
expected_output = [31., 32., 33., 34., 35., 36.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 2, 2, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME",
expected=expected_output)
def testMaxPool3dSamePaddingDifferentStrides(self):
expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=(1, 2, 3),
strides=(2, 3, 1),
padding="SAME",
expected=expected_output)
# Test pooling on a larger input, with different stride and kernel
# size for the 'z' dimension.
# Simulate max pooling in numpy to get the expected output.
input_data = np.arange(1, 5 * 27 * 27 * 64 + 1).reshape((5, 27, 27, 64))
input_data = np.pad(input_data, [[0, 0], [0, 1], [0, 1], [0, 0]],
mode="constant")
expected_output = input_data[:, 1::2, 1::2, :]
expected_output[:, -1, :, :] = input_data[:, -2, 1::2, :]
expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]
expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 27, 27, 64],
window=(1, 2, 2),
strides=(1, 2, 2),
padding="SAME",
expected=expected_output.flatten())
def testKernelSmallerThanStride(self):
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[58, 61, 79, 82, 205, 208, 226, 229])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])
def _ConstructAndTestGradient(self,
pool_func,
input_sizes,
output_sizes,
window,
strides,
padding,
x_init_value=None):
"""Verifies the gradients of the avg pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
x_init_value: Values to be passed to the gradient checker.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=True):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
err_margin = 1e-3
if pool_func == nn_ops.avg_pool3d:
func_name = "avg_pool3d"
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool3d"
t = pool_func(
input_tensor,
ksize=[1, window[0], window[1], window[2], 1],
strides=[1, strides[0], strides[1], strides[2], 1],
padding=padding,
name=func_name)
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
print("%s gradient error = " % func_name, err)
self.assertLess(err, err_margin)
def testMaxPoolGradValidPadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 3, 3, 3, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="VALID")
def testMaxPoolGradValidPadding2_1_6_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 3, 3, 6, 3],
output_sizes=[2, 2, 2, 5, 3],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
def testMaxPoolGradValidPadding2_1_7_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 3, 5, 7, 3],
output_sizes=[2, 2, 4, 6, 3],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
def testMaxPoolGradValidPadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 2, 2, 2, 3],
output_sizes=[2, 1, 1, 1, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID")
def testMaxPoolGradSamePadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 3, 2, 4, 1],
output_sizes=[2, 3, 2, 4, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="SAME")
def testMaxPoolGradSamePadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 3, 2, 4, 1],
output_sizes=[2, 3, 2, 4, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="SAME")
def testMaxPoolGradSamePadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 5, 2, 4, 3],
output_sizes=[2, 3, 1, 2, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME")
def testMaxPoolGradSamePadding3_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 7, 1],
output_sizes=[1, 3, 3, 7, 1],
window=(3, 3, 3),
strides=(1, 1, 1),
padding="SAME")
def testAvgPoolGradValidPadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 3, 3, 3, 3],
output_sizes=[2, 3, 3, 3, 3],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="VALID")
def testAvgPoolGradValidPadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 3, 3, 3, 3],
output_sizes=[2, 2, 2, 2, 3],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
def testAvgPoolGradValidPadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 2, 2, 2, 3],
output_sizes=[2, 1, 1, 1, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID")
def testAvgPoolGradSamePadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 3, 2, 4, 3],
output_sizes=[2, 3, 2, 4, 3],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="SAME")
def testAvgPoolGradSamePadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 2, 1],
output_sizes=[1, 2, 2, 2, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="SAME")
def testAvgPoolGradSamePadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 5, 2, 4, 3],
output_sizes=[2, 3, 1, 2, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME")
def testAvgPoolGradSamePadding3_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 6, 7, 1],
output_sizes=[1, 3, 6, 7, 1],
window=(3, 3, 3),
strides=(1, 1, 1),
padding="SAME")
if __name__ == "__main__":
test.main()
|
spblightadv/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/mac/gyptest-postbuild-static-library.py
|
349
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a postbuilds on static libraries work, and that sourceless
libraries don't cause failures at gyp time.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['make', 'xcode'])
CHDIR = 'postbuild-static-library'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', 'my_lib', chdir=CHDIR)
# Building my_sourceless_lib doesn't work with make. gyp should probably
# forbid sourceless static libraries, since they're pretty pointless.
# But they shouldn't cause gyp time exceptions.
test.built_file_must_exist('postbuild-file', chdir=CHDIR)
test.pass_test()
|
mikelambert/urllib3
|
refs/heads/master
|
urllib3/__init__.py
|
2
|
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = 'dev'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
|
talon-one/talon_one.py
|
refs/heads/master
|
test/test_error_source.py
|
1
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.error_source import ErrorSource # noqa: E501
from talon_one.rest import ApiException
class TestErrorSource(unittest.TestCase):
"""ErrorSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ErrorSource
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.error_source.ErrorSource() # noqa: E501
if include_optional :
return ErrorSource(
pointer = '0',
parameter = '0',
line = '0',
resource = '0'
)
else :
return ErrorSource(
)
def testErrorSource(self):
"""Test ErrorSource"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
mbad/kitabu
|
refs/heads/master
|
kitabu/migrations/__init__.py
|
12133432
| |
go-bears/rally
|
refs/heads/master
|
tests/unit/plugins/openstack/scenarios/sahara/__init__.py
|
12133432
| |
rodrigob/keras
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
petteyg/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/relativeImportSourceWithSpacesInsideMovedModule/before/src/pkg/subpkg1/__init__.py
|
12133432
| |
mudbungie/NetExplorer
|
refs/heads/master
|
env/lib/python3.4/site-packages/networkx/algorithms/components/weakly_connected.py
|
42
|
# -*- coding: utf-8 -*-
"""Weakly connected components.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils.decorators import not_implemented_for
__authors__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)'
'Christopher Ellison'])
__all__ = [
'number_weakly_connected_components',
'weakly_connected_components',
'weakly_connected_component_subgraphs',
'is_weakly_connected',
]
@not_implemented_for('undirected')
def weakly_connected_components(G):
"""Generate weakly connected components of G.
Parameters
----------
G : NetworkX graph
A directed graph
Returns
-------
comp : generator of sets
A generator of sets of nodes, one for each weakly connected
component of G.
Examples
--------
Generate a sorted list of weakly connected components, largest first.
>>> G = nx.path_graph(4, create_using=nx.DiGraph())
>>> G.add_path([10, 11, 12])
>>> [len(c) for c in sorted(nx.weakly_connected_components(G),
... key=len, reverse=True)]
[4, 3]
If you only want the largest component, it's more efficient to
use max instead of sort.
>>> largest_cc = max(nx.weakly_connected_components(G), key=len)
See Also
--------
strongly_connected_components
Notes
-----
For directed graphs only.
"""
seen = set()
for v in G:
if v not in seen:
c = set(_plain_bfs(G, v))
yield c
seen.update(c)
@not_implemented_for('undirected')
def number_weakly_connected_components(G):
"""Return the number of weakly connected components in G.
Parameters
----------
G : NetworkX graph
A directed graph.
Returns
-------
n : integer
Number of weakly connected components
See Also
--------
connected_components
Notes
-----
For directed graphs only.
"""
return len(list(weakly_connected_components(G)))
@not_implemented_for('undirected')
def weakly_connected_component_subgraphs(G, copy=True):
"""Generate weakly connected components as subgraphs.
Parameters
----------
G : NetworkX graph
A directed graph.
copy: bool (default=True)
If True make a copy of the graph attributes
Returns
-------
comp : generator
A generator of graphs, one for each weakly connected component of G.
Examples
--------
Generate a sorted list of weakly connected components, largest first.
>>> G = nx.path_graph(4, create_using=nx.DiGraph())
>>> G.add_path([10, 11, 12])
>>> [len(c) for c in sorted(nx.weakly_connected_component_subgraphs(G),
... key=len, reverse=True)]
[4, 3]
If you only want the largest component, it's more efficient to
use max instead of sort.
>>> Gc = max(nx.weakly_connected_component_subgraphs(G), key=len)
See Also
--------
strongly_connected_components
connected_components
Notes
-----
For directed graphs only.
Graph, node, and edge attributes are copied to the subgraphs by default.
"""
for comp in weakly_connected_components(G):
if copy:
yield G.subgraph(comp).copy()
else:
yield G.subgraph(comp)
@not_implemented_for('undirected')
def is_weakly_connected(G):
"""Test directed graph for weak connectivity.
A directed graph is weakly connected if, and only if, the graph
is connected when the direction of the edge between nodes is ignored.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
connected : bool
True if the graph is weakly connected, False otherwise.
See Also
--------
is_strongly_connected
is_semiconnected
is_connected
Notes
-----
For directed graphs only.
"""
if len(G) == 0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph.""")
return len(list(weakly_connected_components(G))[0]) == len(G)
def _plain_bfs(G, source):
"""A fast BFS node generator
The direction of the edge between nodes is ignored.
For directed graphs only.
"""
Gsucc = G.succ
Gpred = G.pred
seen = set()
nextlevel = {source}
while nextlevel:
thislevel = nextlevel
nextlevel = set()
for v in thislevel:
if v not in seen:
yield v
seen.add(v)
nextlevel.update(Gsucc[v])
nextlevel.update(Gpred[v])
|
LilithWittmann/froide
|
refs/heads/master
|
froide/publicbody/south_migrations/0007_auto__del_field_publicbody_topic_slug.py
|
6
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from froide.helper.auth_migration_util import USER_DB_NAME
APP_MODEL, APP_MODEL_NAME = 'account.User', 'account.user'
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PublicBody.topic_slug'
db.delete_column('publicbody_publicbody', 'topic_slug')
db.delete_column('publicbody_publicbody', 'topic_name')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'PublicBody.topic_slug'
raise RuntimeError("Cannot reverse this migration. 'PublicBody.topic_slug' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
APP_MODEL_NAME: {
'Meta': {'object_name': 'User', 'db_table': "'%s'" % USER_DB_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'publicbody.foilaw': {
'Meta': {'object_name': 'FoiLaw'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'letter_end': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'letter_start': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'max_response_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_response_time_unit': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '3'}),
'refusal_reasons': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'publicbody.publicbody': {
'Meta': {'object_name': 'PublicBody'},
'_created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'public_body_creators'", 'null': 'True', 'to': "orm['%s']" % APP_MODEL}),
'_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'public_body_updaters'", 'null': 'True', 'to': "orm['%s']" % APP_MODEL}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classification': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'classification_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'laws': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['publicbody.FoiLaw']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number_of_requests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'null': 'True', 'blank': 'True', 'to': "orm['publicbody.PublicBody']"}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'descendants'", 'null': 'True', 'blank': 'True', 'to': "orm['publicbody.PublicBody']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBodyTopic']", 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website_dump': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'publicbody.publicbodytopic': {
'Meta': {'object_name': 'PublicBodyTopic'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['publicbody']
|
zimmermegan/MARDA
|
refs/heads/master
|
nltk-3.0.3/nltk/corpus/reader/nombank.py
|
10
|
# Natural Language Toolkit: NomBank Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Authors: Paul Bedaride <paul.bedaride@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import unicode_literals
from nltk.tree import Tree
from xml.etree import ElementTree
from nltk.internals import raise_unorderable_types
from nltk.compat import total_ordering, python_2_unicode_compatible, string_types
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class NombankCorpusReader(CorpusReader):
"""
Corpus reader for the nombank corpus, which augments the Penn
Treebank with information about the predicate argument structure
of every noun instance. The corpus consists of two parts: the
predicate-argument annotations themselves, and a set of "frameset
files" which define the argument labels used by the annotations,
on a per-noun basis. Each "frameset file" contains one or more
predicates, such as ``'turn'`` or ``'turn_on'``, each of which is
divided into coarse-grained word senses called "rolesets". For
each "roleset", the frameset file provides descriptions of the
argument roles, along with examples.
"""
def __init__(self, root, nomfile, framefiles='',
nounsfile=None, parse_fileid_xform=None,
parse_corpus=None, encoding='utf8'):
"""
:param root: The root directory for this corpus.
:param nomfile: The name of the file containing the predicate-
argument annotations (relative to ``root``).
:param framefiles: A list or regexp specifying the frameset
fileids for this corpus.
:param parse_fileid_xform: A transform that should be applied
to the fileids in this corpus. This should be a function
of one argument (a fileid) that returns a string (the new
fileid).
:param parse_corpus: The corpus containing the parse trees
corresponding to this corpus. These parse trees are
necessary to resolve the tree pointers used by nombank.
"""
# If framefiles is specified as a regexp, expand it.
if isinstance(framefiles, string_types):
framefiles = find_corpus_fileids(root, framefiles)
framefiles = list(framefiles)
# Initialze the corpus reader.
CorpusReader.__init__(self, root, [nomfile, nounsfile] + framefiles,
encoding)
# Record our frame fileids & nom file.
self._nomfile = nomfile
self._framefiles = framefiles
self._nounsfile = nounsfile
self._parse_fileid_xform = parse_fileid_xform
self._parse_corpus = parse_corpus
def raw(self, fileids=None):
"""
:return: the text contents of the given fileids, as a single string.
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def instances(self, baseform=None):
"""
:return: a corpus view that acts as a list of
``NombankInstance`` objects, one for each noun in the corpus.
"""
kwargs = {}
if baseform is not None:
kwargs['instance_filter'] = lambda inst: inst.baseform==baseform
return StreamBackedCorpusView(self.abspath(self._nomfile),
lambda stream: self._read_instance_block(stream, **kwargs),
encoding=self.encoding(self._nomfile))
def lines(self):
"""
:return: a corpus view that acts as a list of strings, one for
each line in the predicate-argument annotation file.
"""
return StreamBackedCorpusView(self.abspath(self._nomfile),
read_line_block,
encoding=self.encoding(self._nomfile))
def roleset(self, roleset_id):
"""
:return: the xml description for the given roleset.
"""
baseform = roleset_id.split('.')[0]
baseform = baseform.replace('perc-sign','%')
baseform = baseform.replace('oneslashonezero', '1/10').replace('1/10','1-slash-10')
framefile = 'frames/%s.xml' % baseform
if framefile not in self._framefiles:
raise ValueError('Frameset file for %s not found' %
roleset_id)
# n.b.: The encoding for XML fileids is specified by the file
# itself; so we ignore self._encoding here.
etree = ElementTree.parse(self.abspath(framefile).open()).getroot()
for roleset in etree.findall('predicate/roleset'):
if roleset.attrib['id'] == roleset_id:
return roleset
else:
raise ValueError('Roleset %s not found in %s' %
(roleset_id, framefile))
def rolesets(self, baseform=None):
"""
:return: list of xml descriptions for rolesets.
"""
if baseform is not None:
framefile = 'frames/%s.xml' % baseform
if framefile not in self._framefiles:
raise ValueError('Frameset file for %s not found' %
baseform)
framefiles = [framefile]
else:
framefiles = self._framefiles
rsets = []
for framefile in framefiles:
# n.b.: The encoding for XML fileids is specified by the file
# itself; so we ignore self._encoding here.
etree = ElementTree.parse(self.abspath(framefile).open()).getroot()
rsets.append(etree.findall('predicate/roleset'))
return LazyConcatenation(rsets)
def nouns(self):
"""
:return: a corpus view that acts as a list of all noun lemmas
in this corpus (from the nombank.1.0.words file).
"""
return StreamBackedCorpusView(self.abspath(self._nounsfile),
read_line_block,
encoding=self.encoding(self._nounsfile))
def _read_instance_block(self, stream, instance_filter=lambda inst: True):
block = []
# Read 100 at a time.
for i in range(100):
line = stream.readline().strip()
if line:
inst = NombankInstance.parse(
line, self._parse_fileid_xform,
self._parse_corpus)
if instance_filter(inst):
block.append(inst)
return block
######################################################################
#{ Nombank Instance & related datatypes
######################################################################
@python_2_unicode_compatible
class NombankInstance(object):
def __init__(self, fileid, sentnum, wordnum, baseform, sensenumber,
predicate, predid, arguments, parse_corpus=None):
self.fileid = fileid
"""The name of the file containing the parse tree for this
instance's sentence."""
self.sentnum = sentnum
"""The sentence number of this sentence within ``fileid``.
Indexing starts from zero."""
self.wordnum = wordnum
"""The word number of this instance's predicate within its
containing sentence. Word numbers are indexed starting from
zero, and include traces and other empty parse elements."""
self.baseform = baseform
"""The baseform of the predicate."""
self.sensenumber = sensenumber
"""The sense number of the predicate."""
self.predicate = predicate
"""A ``NombankTreePointer`` indicating the position of this
instance's predicate within its containing sentence."""
self.predid = predid
"""Identifier of the predicate."""
self.arguments = tuple(arguments)
"""A list of tuples (argloc, argid), specifying the location
and identifier for each of the predicate's argument in the
containing sentence. Argument identifiers are strings such as
``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain
the predicate."""
self.parse_corpus = parse_corpus
"""A corpus reader for the parse trees corresponding to the
instances in this nombank corpus."""
@property
def roleset(self):
"""The name of the roleset used by this instance's predicate.
Use ``nombank.roleset() <NombankCorpusReader.roleset>`` to
look up information about the roleset."""
r = self.baseform.replace('%', 'perc-sign')
r = r.replace('1/10', '1-slash-10').replace('1-slash-10', 'oneslashonezero')
return '%s.%s'%(r, self.sensenumber)
def __repr__(self):
return ('<NombankInstance: %s, sent %s, word %s>' %
(self.fileid, self.sentnum, self.wordnum))
def __str__(self):
s = '%s %s %s %s %s' % (self.fileid, self.sentnum, self.wordnum,
self.baseform, self.sensenumber)
items = self.arguments + ((self.predicate, 'rel'),)
for (argloc, argid) in sorted(items):
s += ' %s-%s' % (argloc, argid)
return s
def _get_tree(self):
if self.parse_corpus is None: return None
if self.fileid not in self.parse_corpus.fileids(): return None
return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum]
tree = property(_get_tree, doc="""
The parse tree corresponding to this instance, or None if
the corresponding tree is not available.""")
@staticmethod
def parse(s, parse_fileid_xform=None, parse_corpus=None):
pieces = s.split()
if len(pieces) < 6:
raise ValueError('Badly formatted nombank line: %r' % s)
# Divide the line into its basic pieces.
(fileid, sentnum, wordnum,
baseform, sensenumber) = pieces[:5]
args = pieces[5:]
rel = [args.pop(i) for i,p in enumerate(args) if '-rel' in p]
if len(rel) != 1:
raise ValueError('Badly formatted nombank line: %r' % s)
# Apply the fileid selector, if any.
if parse_fileid_xform is not None:
fileid = parse_fileid_xform(fileid)
# Convert sentence & word numbers to ints.
sentnum = int(sentnum)
wordnum = int(wordnum)
# Parse the predicate location.
predloc, predid = rel[0].split('-', 1)
predicate = NombankTreePointer.parse(predloc)
# Parse the arguments.
arguments = []
for arg in args:
argloc, argid = arg.split('-', 1)
arguments.append( (NombankTreePointer.parse(argloc), argid) )
# Put it all together.
return NombankInstance(fileid, sentnum, wordnum, baseform, sensenumber,
predicate, predid, arguments, parse_corpus)
class NombankPointer(object):
"""
A pointer used by nombank to identify one or more constituents in
a parse tree. ``NombankPointer`` is an abstract base class with
three concrete subclasses:
- ``NombankTreePointer`` is used to point to single constituents.
- ``NombankSplitTreePointer`` is used to point to 'split'
constituents, which consist of a sequence of two or more
``NombankTreePointer`` pointers.
- ``NombankChainTreePointer`` is used to point to entire trace
chains in a tree. It consists of a sequence of pieces, which
can be ``NombankTreePointer`` or ``NombankSplitTreePointer`` pointers.
"""
def __init__(self):
if self.__class__ == NombankPointer:
raise NotImplementedError()
@python_2_unicode_compatible
class NombankChainTreePointer(NombankPointer):
def __init__(self, pieces):
self.pieces = pieces
"""A list of the pieces that make up this chain. Elements may
be either ``NombankSplitTreePointer`` or
``NombankTreePointer`` pointers."""
def __str__(self):
return '*'.join('%s' % p for p in self.pieces)
def __repr__(self):
return '<NombankChainTreePointer: %s>' % self
def select(self, tree):
if tree is None: raise ValueError('Parse tree not avaialable')
return Tree('*CHAIN*', [p.select(tree) for p in self.pieces])
@python_2_unicode_compatible
class NombankSplitTreePointer(NombankPointer):
def __init__(self, pieces):
self.pieces = pieces
"""A list of the pieces that make up this chain. Elements are
all ``NombankTreePointer`` pointers."""
def __str__(self):
return ','.join('%s' % p for p in self.pieces)
def __repr__(self):
return '<NombankSplitTreePointer: %s>' % self
def select(self, tree):
if tree is None: raise ValueError('Parse tree not avaialable')
return Tree('*SPLIT*', [p.select(tree) for p in self.pieces])
@total_ordering
@python_2_unicode_compatible
class NombankTreePointer(NombankPointer):
"""
wordnum:height*wordnum:height*...
wordnum:height,
"""
def __init__(self, wordnum, height):
self.wordnum = wordnum
self.height = height
@staticmethod
def parse(s):
# Deal with chains (xx*yy*zz)
pieces = s.split('*')
if len(pieces) > 1:
return NombankChainTreePointer([NombankTreePointer.parse(elt)
for elt in pieces])
# Deal with split args (xx,yy,zz)
pieces = s.split(',')
if len(pieces) > 1:
return NombankSplitTreePointer([NombankTreePointer.parse(elt)
for elt in pieces])
# Deal with normal pointers.
pieces = s.split(':')
if len(pieces) != 2: raise ValueError('bad nombank pointer %r' % s)
return NombankTreePointer(int(pieces[0]), int(pieces[1]))
def __str__(self):
return '%s:%s' % (self.wordnum, self.height)
def __repr__(self):
return 'NombankTreePointer(%d, %d)' % (self.wordnum, self.height)
def __eq__(self, other):
while isinstance(other, (NombankChainTreePointer,
NombankSplitTreePointer)):
other = other.pieces[0]
if not isinstance(other, NombankTreePointer):
return self is other
return (self.wordnum == other.wordnum and self.height == other.height)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
while isinstance(other, (NombankChainTreePointer,
NombankSplitTreePointer)):
other = other.pieces[0]
if not isinstance(other, NombankTreePointer):
return id(self) < id(other)
return (self.wordnum, -self.height) < (other.wordnum, -other.height)
def select(self, tree):
if tree is None: raise ValueError('Parse tree not avaialable')
return tree[self.treepos(tree)]
def treepos(self, tree):
"""
Convert this pointer to a standard 'tree position' pointer,
given that it points to the given tree.
"""
if tree is None: raise ValueError('Parse tree not avaialable')
stack = [tree]
treepos = []
wordnum = 0
while True:
#print treepos
#print stack[-1]
# tree node:
if isinstance(stack[-1], Tree):
# Select the next child.
if len(treepos) < len(stack):
treepos.append(0)
else:
treepos[-1] += 1
# Update the stack.
if treepos[-1] < len(stack[-1]):
stack.append(stack[-1][treepos[-1]])
else:
# End of node's child list: pop up a level.
stack.pop()
treepos.pop()
# word node:
else:
if wordnum == self.wordnum:
return tuple(treepos[:len(treepos)-self.height-1])
else:
wordnum += 1
stack.pop()
|
KDB2/OpenReliability
|
refs/heads/master
|
veusz/widgets/colorbar.py
|
4
|
# Copyright (C) 2007 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""A colorbar widget for the image widget. Should show the scale of
the image."""
from __future__ import division
from .. import qtall as qt4
import numpy as N
from .. import document
from .. import setting
from .. import utils
from . import widget
from . import axis
def _(text, disambiguation=None, context='ColorBar'):
"""Translate text."""
return qt4.QCoreApplication.translate(context, text, disambiguation)
class ColorBar(axis.Axis):
"""Color bar for showing scale of image.
This naturally is descended from an axis
"""
typename='colorbar'
allowusercreation = True
description = _('Image color bar')
isaxis = False
def __init__(self, parent, name=None):
"""Initialise object and create axes."""
axis.Axis.__init__(self, parent, name=name)
if type(self) == ColorBar:
self.readDefaults()
@classmethod
def addSettings(klass, s):
"""Construct list of settings."""
axis.Axis.addSettings(s)
s.add( setting.WidgetChoice('widgetName', '',
descr=_('Corresponding widget'),
widgettypes=('image', 'xy', 'nonorthpoint'),
usertext = _('Widget')), 0 )
s.get('log').readonly = True
s.get('datascale').readonly = True
s.add( setting.AlignHorzWManual( 'horzPosn',
'right',
descr = _('Horizontal position'),
usertext=_('Horz posn'),
formatting=True) )
s.add( setting.AlignVertWManual( 'vertPosn',
'bottom',
descr = _('Vertical position'),
usertext=_('Vert posn'),
formatting=True) )
s.add( setting.DistanceOrAuto('width', 'Auto',
descr = _('Width of colorbar'),
usertext=_('Width'),
formatting=True) )
s.add( setting.DistanceOrAuto('height', 'Auto',
descr = _('Height of colorbar'),
usertext=_('Height'),
formatting=True) )
s.add( setting.Float( 'horzManual',
0.,
descr = _('Manual horizontal fractional position'),
usertext=_('Horz manual'),
formatting=True) )
s.add( setting.Float( 'vertManual',
0.,
descr = _('Manual vertical fractional position'),
usertext=_('Vert manual'),
formatting=True) )
s.add( setting.Line('Border', descr = _('Colorbar border line'),
usertext=_('Border')),
pixmap='settings_border')
s.add( setting.SettingBackwardCompat('image', 'widgetName', None) )
@classmethod
def allowedParentTypes(klass):
from . import graph, grid, nonorthgraph
return (graph.Graph, grid.Grid, nonorthgraph.NonOrthGraph)
@property
def userdescription(self):
return _("widget='%s', label='%s'") % (
self.settings.widgetName, self.settings.label)
def chooseName(self):
"""Get name of widget."""
# override axis naming of x and y
return widget.Widget.chooseName(self)
def _axisDraw(self, posn, parentposn, outerbounds, painter, phelper):
"""Do actual drawing."""
s = self.settings
# get height of label font
bounds = self.computeBounds(parentposn, phelper)
font = s.get('Label').makeQFont(phelper)
painter.setFont(font)
fontheight = utils.FontMetrics(font, painter.device()).height()
horz = s.direction == 'horizontal'
# use above to estimate width and height if necessary
w = s.get('width')
if w.isAuto():
if horz:
totalwidth = bounds[2] - bounds[0] - 2*fontheight
else:
totalwidth = fontheight
else:
totalwidth = w.convert(painter)
h = s.get('height')
if h.isAuto():
if horz:
totalheight = fontheight
else:
totalheight = bounds[3] - bounds[1] - 2*fontheight
else:
totalheight = h.convert(painter)
# work out horizontal position
h = s.horzPosn
if h == 'left':
bounds[0] += fontheight
bounds[2] = bounds[0] + totalwidth
elif h == 'right':
bounds[2] -= fontheight
bounds[0] = bounds[2] - totalwidth
elif h == 'centre':
delta = (bounds[2]-bounds[0]-totalwidth)/2.
bounds[0] += delta
bounds[2] -= delta
elif h == 'manual':
bounds[0] += (bounds[2]-bounds[0])*s.horzManual
bounds[2] = bounds[0] + totalwidth
# work out vertical position
v = s.vertPosn
if v == 'top':
bounds[1] += fontheight
bounds[3] = bounds[1] + totalheight
elif v == 'bottom':
bounds[3] -= fontheight
bounds[1] = bounds[3] - totalheight
elif v == 'centre':
delta = (bounds[3]-bounds[1]-totalheight)/2.
bounds[1] += delta
bounds[3] -= delta
elif v == 'manual':
bounds[1] += (bounds[3]-bounds[1])*s.vertManual
bounds[3] = bounds[1] + totalheight
# FIXME: this is ugly - update bounds in helper state
phelper.states[(self,0)].bounds = bounds
# do no painting if hidden or no image
imgwidget = s.get('widgetName').findWidget()
if s.hide:
return bounds
self.updateAxisLocation(bounds)
# update image if necessary with new settings
if imgwidget is not None:
minval, maxval, axisscale, cmapname, trans, invert = \
imgwidget.getColorbarParameters()
cmap = self.document.evaluate.getColormap(cmapname, invert)
img = utils.makeColorbarImage(
minval, maxval, axisscale, cmap, trans,
direction=s.direction)
else:
# couldn't find widget
minval, maxval, axisscale = 0., 1., 'linear'
img = None
s.get('log').setSilent(axisscale == 'log')
self.setAutoRange([minval, maxval])
self.computePlottedRange(force=True)
# now draw image on axis...
minpix, maxpix = self.graphToPlotterCoords(
bounds, N.array([minval, maxval]) )
routside = qt4.QRectF(
bounds[0], bounds[1],
bounds[2]-bounds[0], bounds[3]-bounds[1] )
# really draw the img
if img is not None:
# coordinates to draw image and to clip rectangle
if s.direction == 'horizontal':
c = [ minpix, bounds[1], maxpix, bounds[3] ]
cl = [ self.coordParr1, bounds[1], self.coordParr2, bounds[3] ]
else:
c = [ bounds[0], maxpix, bounds[2], minpix ]
cl = [ bounds[0], self.coordParr1, bounds[2], self.coordParr2 ]
r = qt4.QRectF(c[0], c[1], c[2]-c[0], c[3]-c[1])
rclip = qt4.QRectF(cl[0], cl[1], cl[2]-cl[0], cl[3]-cl[1])
painter.save()
painter.setClipRect(rclip & routside)
painter.drawImage(r, img)
painter.restore()
# if there's a border
if not s.Border.hide:
painter.setPen( s.get('Border').makeQPen(painter) )
painter.setBrush( qt4.QBrush() )
painter.drawRect( routside )
# actually draw axis
axis.Axis._axisDraw(self, bounds, parentposn, None, painter,
phelper)
# allow the factory to instantiate a colorbar
document.thefactory.register( ColorBar )
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/types/ImportDecoratedFunctionType/a.py
|
27
|
from ext import func
value = func("sd")
dec_func = func
|
admcrae/tensorflow
|
refs/heads/master
|
tensorflow/python/framework/contrib_test.py
|
38
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that the contrib module shows up properly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensorflow.python.platform import test
class ContribTest(test.TestCase):
def testContrib(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
_ = tf.contrib.layers # `tf.contrib` is loaded lazily on first use.
assert inspect.ismodule(tf.contrib)
def testLayers(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
assert inspect.ismodule(tf.contrib.layers)
def testLinearOptimizer(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
assert inspect.ismodule(tf.contrib.linear_optimizer)
if __name__ == '__main__':
test.main()
|
wenkaiqiu/petal
|
refs/heads/master
|
uniform_model/devices/device_group.py
|
2
|
import logging
logging.basicConfig(format='%(asctime)s <%(name)s> %(message)s')
logger = logging.getLogger('uniform_model.devices.group')
logger.setLevel(logging.DEBUG)
class DeviceGroup:
def __init__(self, *args):
logger.info(f"group devices: {args}")
self.group = list(args)
def __str__(self):
return f'<Group: {", ".join(map(str, self.group))}>'
def __repr__(self):
return f'<Group: {", ".join(map(str, self.group))}>'
|
ericfc/django
|
refs/heads/master
|
tests/invalid_models_tests/test_backend_specific.py
|
121
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from types import MethodType
from django.core.checks import Error
from django.db import connection, models
from .base import IsolatedModelsTestCase
class BackendSpecificChecksTests(IsolatedModelsTestCase):
def test_check_field(self):
""" Test if backend specific checks are performed. """
error = Error('an error', hint=None)
def mock(self, field, **kwargs):
return [error]
class Model(models.Model):
field = models.IntegerField()
field = Model._meta.get_field('field')
# Mock connection.validation.check_field method.
v = connection.validation
old_check_field = v.check_field
v.check_field = MethodType(mock, v)
try:
errors = field.check()
finally:
# Unmock connection.validation.check_field method.
v.check_field = old_check_field
self.assertEqual(errors, [error])
|
Jumpscale/jumpscale6_core
|
refs/heads/master
|
apps/gridportal/base/Grid/.macros/wiki/vdisks/1_vdisks.py
|
1
|
def main(j, args, params, tags, tasklet):
params.merge(args)
doc = params.doc
actor = j.apps.actorsloader.getActor("system", "gridmanager")
machineid = args.getTag('machineid')
out = []
#this makes sure bootstrap datatables functionality is used
out.append("{{datatables_use}}}}\n")
fields = ['id', 'nid', 'devicename', 'description', 'active', 'sizeondisk', 'free', 'path']
out.append('||id||node||devicename||description||active||free||path||')
vdisks = actor.getVDisks(machineid=int(machineid))
if not vdisks:
params.result = ('No disks found', doc)
return params
for vdisk in vdisks:
line = [""]
for field in fields:
# add links
if field == 'id':
line.append('[%(id)s|/grid/vdisk?id=%(id)s&gid=%(gid)s]' % vdisk)
elif field == 'nid':
line.append('[%(nid)s|/grid/node?id=%(nid)s&grid=%(gid)s]' % vdisk)
elif field == 'sizeondisk':
continue
elif field == 'free':
diskfree = vdisk[field]
disksize = vdisk['sizeondisk']
if disksize:
diskusage = 100 - int(100.0 * diskfree / disksize)
else:
diskusage = 0
line.append('%s%%' % diskusage)
else:
line.append(str(vdisk[field]))
line.append("")
#out.append("|[%s|/grid/node?id=%s]|%s|%s|%s|" % (node["id"], node["id"], node["name"], ipaddr, roles))
out.append("|".join(line))
params.result = ('\n'.join(out), doc)
return params
def match(j, args, params, tags, tasklet):
return True
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
cpython/275_test_exception_variations.py
|
214
|
from test.test_support import run_unittest
import unittest
class ExceptionTestCase(unittest.TestCase):
def test_try_except_else_finally(self):
hit_except = False
hit_else = False
hit_finally = False
try:
raise Exception, 'nyaa!'
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
self.assertFalse(hit_else)
def test_try_except_else_finally_no_exception(self):
hit_except = False
hit_else = False
hit_finally = False
try:
pass
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
self.assertTrue(hit_else)
def test_try_except_finally(self):
hit_except = False
hit_finally = False
try:
raise Exception, 'yarr!'
except:
hit_except = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
def test_try_except_finally_no_exception(self):
hit_except = False
hit_finally = False
try:
pass
except:
hit_except = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
def test_try_except(self):
hit_except = False
try:
raise Exception, 'ahoy!'
except:
hit_except = True
self.assertTrue(hit_except)
def test_try_except_no_exception(self):
hit_except = False
try:
pass
except:
hit_except = True
self.assertFalse(hit_except)
def test_try_except_else(self):
hit_except = False
hit_else = False
try:
raise Exception, 'foo!'
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_else)
self.assertTrue(hit_except)
def test_try_except_else_no_exception(self):
hit_except = False
hit_else = False
try:
pass
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_except)
self.assertTrue(hit_else)
def test_try_finally_no_exception(self):
hit_finally = False
try:
pass
finally:
hit_finally = True
self.assertTrue(hit_finally)
def test_nested(self):
hit_finally = False
hit_inner_except = False
hit_inner_finally = False
try:
try:
raise Exception, 'inner exception'
except:
hit_inner_except = True
finally:
hit_inner_finally = True
finally:
hit_finally = True
self.assertTrue(hit_inner_except)
self.assertTrue(hit_inner_finally)
self.assertTrue(hit_finally)
def test_nested_else(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
try:
try:
pass
except:
hit_inner_except = True
else:
hit_inner_else = True
raise Exception, 'outer exception'
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_inner_except)
self.assertTrue(hit_inner_else)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
def test_main():
run_unittest(ExceptionTestCase)
if __name__ == '__main__':
test_main()
|
osvalr/odoo
|
refs/heads/8.0
|
openerp/addons/test_impex/tests/test_export.py
|
211
|
# -*- coding: utf-8 -*-
import itertools
import openerp.modules.registry
import openerp
from openerp.tests import common
class CreatorCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(CreatorCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(CreatorCase, self).setUp()
self.model = self.registry(self.model_name)
def make(self, value):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': value})
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value, fields=('value',), context=None):
record = self.make(value)
return record._BaseModel__export_rows([f.split('/') for f in fields])
class test_boolean_field(CreatorCase):
model_name = 'export.boolean'
def test_true(self):
self.assertEqual(
self.export(True),
[[u'True']])
def test_false(self):
""" ``False`` value to boolean fields is unique in being exported as a
(unicode) string, not a boolean
"""
self.assertEqual(
self.export(False),
[[u'False']])
class test_integer_field(CreatorCase):
model_name = 'export.integer'
def test_empty(self):
self.assertEqual(self.model.search(self.cr, openerp.SUPERUSER_ID, []), [],
"Test model should have no records")
def test_0(self):
self.assertEqual(
self.export(0),
[[u'0']])
def test_basic_value(self):
self.assertEqual(
self.export(42),
[[u'42']])
def test_negative(self):
self.assertEqual(
self.export(-32),
[[u'-32']])
def test_huge(self):
self.assertEqual(
self.export(2**31-1),
[[unicode(2**31-1)]])
class test_float_field(CreatorCase):
model_name = 'export.float'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[u'0.0']])
def test_epsilon(self):
self.assertEqual(
self.export(0.000000000027),
[[u'2.7e-11']])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678),
[[u'87654321.4678']])
class test_decimal_field(CreatorCase):
model_name = 'export.decimal'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[u'0.0']])
def test_epsilon(self):
""" epsilon gets sliced to 0 due to precision
"""
self.assertEqual(
self.export(0.000000000027),
[[u'0.0']])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678), [[u'87654321.468']])
class test_string_field(CreatorCase):
model_name = 'export.string.bounded'
def test_empty(self):
self.assertEqual(
self.export(""),
[['']])
def test_within_bounds(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_out_of_bounds(self):
self.assertEqual(
self.export("C for Sinking, "
"Java for Drinking, "
"Smalltalk for Thinking. "
"...and Power to the Penguin!"),
[[u"C for Sinking, J"]])
class test_unbound_string_field(CreatorCase):
model_name = 'export.string'
def test_empty(self):
self.assertEqual(
self.export(""),
[['']])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("We flew down weekly to meet with IBM, but they "
"thought the way to measure software was the amount "
"of code we wrote, when really the better the "
"software, the fewer lines of code."),
[[u"We flew down weekly to meet with IBM, but they thought the "
u"way to measure software was the amount of code we wrote, "
u"when really the better the software, the fewer lines of "
u"code."]])
class test_text(CreatorCase):
model_name = 'export.text'
def test_empty(self):
self.assertEqual(
self.export(""),
[['']])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("So, `bind' is `let' and monadic programming is"
" equivalent to programming in the A-normal form. That"
" is indeed all there is to monads"),
[[u"So, `bind' is `let' and monadic programming is equivalent to"
u" programming in the A-normal form. That is indeed all there"
u" is to monads"]])
class test_date(CreatorCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.export(False),
[['']])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07'),
[[u'2011-11-07']])
class test_datetime(CreatorCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.export(False),
[['']])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07 21:05:48'),
[[u'2011-11-07 21:05:48']])
def test_tz(self):
""" Export ignores the timezone and always exports to UTC
.. note:: on the other hand, export uses user lang for name_get
"""
# NOTE: ignores user timezone, always exports to UTC
self.assertEqual(
self.export('2011-11-07 21:05:48', context={'tz': 'Pacific/Norfolk'}),
[[u'2011-11-07 21:05:48']])
class test_selection(CreatorCase):
model_name = 'export.selection'
translations_fr = [
("Qux", "toto"),
("Bar", "titi"),
("Foo", "tete"),
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_value(self):
""" selections export the *label* for their value
"""
self.assertEqual(
self.export(2),
[[u"Bar"]])
def test_localized_export(self):
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.export(2, context={'lang': 'fr_FR'}),
[[u'Bar']])
class test_selection_function(CreatorCase):
model_name = 'export.selection.function'
def test_empty(self):
self.assertEqual(
self.export(False),
[['']])
def test_value(self):
# FIXME: selection functions export the *value* itself
self.assertEqual(
self.export(1),
[[1]])
self.assertEqual(
self.export(3),
[[3]])
# fucking hell
self.assertEqual(
self.export(0),
[['']])
class test_m2o(CreatorCase):
model_name = 'export.many2one'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
""" Exported value is the name_get of the related object
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id]))[integer_id]
self.assertEqual(
self.export(integer_id),
[[name]])
def test_path(self):
""" Can recursively export fields of m2o via path
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
self.assertEqual(
self.export(integer_id, fields=['value/.id', 'value/value']),
[[unicode(integer_id), u'42']])
def test_external_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
# Expecting the m2o target model name in the external id,
# not this model's name
external_id = u'__export__.export_integer_%d' % integer_id
self.assertEqual(
self.export(integer_id, fields=['value/id']),
[[external_id]])
class test_o2m(CreatorCase):
model_name = 'export.one2many'
commands = [
(0, False, {'value': 4, 'str': 'record1'}),
(0, False, {'value': 42, 'str': 'record2'}),
(0, False, {'value': 36, 'str': 'record3'}),
(0, False, {'value': 4, 'str': 'record4'}),
(0, False, {'value': 13, 'str': 'record5'}),
]
names = [
u'export.one2many.child:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.one2many.child:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.one2many.child:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[
u'4', u','.join(self.names)
]])
def test_multiple_records_id(self):
export = self.export(self.commands, fields=['const', 'value/.id'])
O2M_c = self.registry('export.one2many.child')
ids = O2M_c.browse(self.cr, openerp.SUPERUSER_ID,
O2M_c.search(self.cr, openerp.SUPERUSER_ID, []))
self.assertEqual(
export,
[
['4', str(ids[0].id)],
['', str(ids[1].id)],
['', str(ids[2].id)],
['', str(ids[3].id)],
['', str(ids[4].id)],
])
def test_multiple_records_with_name_before(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value', 'value/value']),
[[ # exports sub-fields of very first o2m
u'4', u','.join(self.names), u'4'
]])
def test_multiple_records_with_name_after(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value', 'value']),
[ # completely ignores name_get request
[u'4', u'4', ''],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
def test_multiple_subfields_neighbour(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/str','value/value']),
[
[u'4', u'record1', u'4'],
['', u'record2', u'42'],
['', u'record3', u'36'],
['', u'record4', u'4'],
['', u'record5', u'13'],
])
def test_multiple_subfields_separated(self):
self.assertEqual(
self.export(self.commands, fields=['value/str', 'const', 'value/value']),
[
[u'record1', u'4', u'4'],
[u'record2', '', u'42'],
[u'record3', '', u'36'],
[u'record4', '', u'4'],
[u'record5', '', u'13'],
])
class test_o2m_multiple(CreatorCase):
model_name = 'export.one2many.multiple'
def make(self, value=None, **values):
if value is not None: values['value'] = value
id = self.model.create(self.cr, openerp.SUPERUSER_ID, values)
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value=None, fields=('child1', 'child2',), context=None, **values):
record = self.make(value, **values)
return record._BaseModel__export_rows([f.split('/') for f in fields])
def test_empty(self):
self.assertEqual(
self.export(child1=False, child2=False),
[[False, False]])
def test_single_per_side(self):
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})]),
[[False, u'export.one2many.child.2:42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False),
[[u'export.one2many.child.1:43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})]),
[[u'export.one2many.child.1:43', u'export.one2many.child.2:42']])
def test_single_integrate_subfield(self):
fields = ['const', 'child1/value', 'child2/value']
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', False, u'42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False,
fields=fields),
[[u'36', u'43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', u'43', u'42']])
def test_multiple(self):
""" With two "concurrent" o2ms, exports the first line combined, then
exports the rows for the first o2m, then the rows for the second o2m.
"""
fields = ['const', 'child1/value', 'child2/value']
child1 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(), [4, 42, 36, 4, 13])]
child2 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(10), [8, 12, 8, 55, 33, 13])]
self.assertEqual(
self.export(child1=child1, child2=False, fields=fields),
[
[u'36', u'4', False],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
self.assertEqual(
self.export(child1=False, child2=child2, fields=fields),
[
[u'36', False, u'8'],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
self.assertEqual(
self.export(child1=child1, child2=child2, fields=fields),
[
[u'36', u'4', u'8'],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
class test_m2m(CreatorCase):
model_name = 'export.many2many'
commands = [
(0, False, {'value': 4, 'str': 'record000'}),
(0, False, {'value': 42, 'str': 'record001'}),
(0, False, {'value': 36, 'str': 'record010'}),
(0, False, {'value': 4, 'str': 'record011'}),
(0, False, {'value': 13, 'str': 'record100'}),
]
names = [
u'export.many2many.other:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.many2many.other:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.many2many.other:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[ # FIXME: hardcoded comma, import uses config.csv_internal_sep
# resolution: remove configurable csv_internal_sep
u'4', u','.join(self.names)
]])
# essentially same as o2m, so boring
class test_function(CreatorCase):
model_name = 'export.function'
def test_value(self):
""" Exports value normally returned by accessing the function field
"""
self.assertEqual(
self.export(42),
[[u'3']])
|
perfidia/afefuc
|
refs/heads/master
|
src/gui/MainWindowWrapper.py
|
2
|
'''
Created on Apr 25, 2013
@author: Bartosz Alchimowicz
'''
import os
from PyQt4 import QtCore, QtGui
from generated.ui.MainWindow import Ui_MainWindow
from gui.ActorsTabWrapper import ActorsTabWrapper
from gui.PropertiesTabWrapper import PropertiesTabWrapper
from gui.UseCasesTabWrapper import UseCasesTabWrapper
from gui.PrioritiesTabWrapper import PrioritiesTabWrapper
from gui.GoalLevelsTabWrapper import GoalLevelsTabWrapper
from gui.BusinessObjectsTabWrapper import BusinessObjectsTabWrapper
from gui.BusinessRulesTabWrapper import BusinessRulesTabWrapper
from gui.TestCasesTabWrapper import TestCasesTabWrapper
from gui.GlossaryTabWrapper import GlossaryTabWrapper
from gui.WikiExportWrapper import WikiExportWrapper
from gui.SeleniumExportWrapper import SeleniumExportWrapper
import format.writer.xml
import format.reader.xml
from format.writer.selenium import selenium
from testcases.highlighter import highlighter
import format.model
from utils.clipboard import Clipboard
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class MainWindowWrapper(QtGui.QMainWindow):
def __init__(self, parent=None, application=None):
QtGui.QWidget.__init__(self, parent)
self.mainWindow = Ui_MainWindow()
self.afefuc = {'project': None}
self.afefuc = {'clipboard': Clipboard(application)}
self.filename = None
def show(self):
super(QtGui.QMainWindow, self).show()
self.mainWindow.setupUi(self)
self.afefuc["project"] = format.model.Project()
self.mainWindow.tabWidget.hide()
screen = QtGui.QApplication.desktop().screenGeometry()
self.move(screen.center() - self.rect().center())
self.activateWindow()
self.mainWindow.actionNew.triggered.connect(self.clickedNew)
self.mainWindow.actionOpen.triggered.connect(self.clickedOpen)
self.mainWindow.actionSave.triggered.connect(self.clickedSave)
self.mainWindow.actionSaveAs.triggered.connect(self.clickedSaveAs)
self.mainWindow.actionClose.triggered.connect(self.clickedClose)
self.mainWindow.actionQuit.triggered.connect(self.clickedQuit)
self.mainWindow.actionDump.triggered.connect(self.clickedDump)
self.mainWindow.actionWikiExport.triggered.connect(self.clickedExportWiki)
self.mainWindow.actionSeleniumExport.triggered.connect(self.clickedExportSelenium)
self.propertiesTab = PropertiesTabWrapper(self, self.afefuc)
self.propertiesTab.show()
self.prioritiesTab = PrioritiesTabWrapper(self, self.afefuc)
self.prioritiesTab.show()
self.goalLevelTab = GoalLevelsTabWrapper(self, self.afefuc)
self.goalLevelTab.show()
self.businessObjectsTab = BusinessObjectsTabWrapper(self, self.afefuc)
self.businessObjectsTab.show()
self.businessRulesTab = BusinessRulesTabWrapper(self, self.afefuc)
self.businessRulesTab.show()
self.actorsTab = ActorsTabWrapper(self, self.afefuc)
self.actorsTab.show()
self.usecasesTab = UseCasesTabWrapper(self, self.afefuc)
self.usecasesTab.show()
self.testcasesTab = TestCasesTabWrapper(self, self.afefuc)
self.testcasesTab.show()
self.glossaryTab = GlossaryTabWrapper(self, self.afefuc)
self.glossaryTab.show()
if os.path.isfile("../private/data.py"):
import sys
print "using debug data"
sys.path.append('../private')
import data
self.afefuc["project"] = data.project
self.load()
self.mainWindow.tabWidget.show()
def load(self):
self.propertiesTab.load()
self.prioritiesTab.load()
self.goalLevelTab.load()
self.businessObjectsTab.load()
self.businessRulesTab.load()
self.actorsTab.load()
self.usecasesTab.load()
self.testcasesTab.load()
self.glossaryTab.load()
def clickedNew(self):
self.clickedClose()
self.mainWindow.tabWidget.show()
self.afefuc["project"] = format.model.Project()
self.load()
def clickedOpen(self):
filename = QtGui.QFileDialog.getOpenFileName(self, "Open", "", "AFEFUC (*.auc)")
if filename:
filename = unicode(filename.toUtf8(), "utf-8")
self.clickedClose()
self.filename = filename
self.afefuc['project'] = format.reader.xml.read(self.filename)
self.load()
self.mainWindow.tabWidget.show()
def clickedSave(self, saveAs = False):
self.afefuc['project'].name = unicode(self.propertiesTab.tab.projectNameEdit.text().toUtf8(), "utf-8")
self.afefuc['project'].version = unicode(self.propertiesTab.tab.versionEdit.text().toUtf8(), "utf-8")
self.afefuc['project'].abbreviation = unicode(self.propertiesTab.tab.abbreviationEdit.text().toUtf8(), "utf-8")
self.afefuc['project'].problem_description = unicode(self.propertiesTab.tab.problemEdit.toPlainText().toUtf8(), "utf-8")
self.afefuc['project'].system_description = unicode(self.propertiesTab.tab.systemEdit.toPlainText().toUtf8(), "utf-8")
if self.afefuc['project'] and (not self.filename or saveAs == True):
self.filename = QtGui.QFileDialog.getSaveFileName(self, "Save as", "", "AFEFUC (*.auc)")
if self.filename:
if self.filename[-4:] != '.auc':
self.filename += ".auc"
format.writer.xml.write(self.filename, self.afefuc['project'])
def clickedSaveAs(self):
self.clickedSave(saveAs = True)
def clickedClose(self):
self.filename = None
self.afefuc['project']= None
self.mainWindow.tabWidget.hide()
def clickedQuit(self):
self.close()
def clickedExportWiki(self):
if self.afefuc['project']:
WikiExportWrapper(self, self.afefuc).show()
def clickedExportSelenium(self):
if self.afefuc['project']:
SeleniumExportWrapper(self, self.afefuc).show()
def clickedDump(self):
format.writer.xml.write("tmp.auc", self.afefuc['project'])
import objgraph
objgraph.show_refs(self.afefuc['project'], refcounts=True, filename='project-refs.png')
#objgraph.show_backrefs(self.afefuc['project'], filename='project-backref.png')
|
tinloaf/home-assistant
|
refs/heads/dev
|
homeassistant/components/weblink.py
|
9
|
"""
Support for links to external web pages.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/weblink/
"""
import logging
import voluptuous as vol
from homeassistant.const import (CONF_NAME, CONF_ICON, CONF_URL)
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ENTITIES = 'entities'
CONF_RELATIVE_URL_ERROR_MSG = "Invalid relative URL. Absolute path required."
CONF_RELATIVE_URL_REGEX = r'\A/'
DOMAIN = 'weblink'
ENTITIES_SCHEMA = vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Required(CONF_URL): vol.Any(
vol.Match(CONF_RELATIVE_URL_REGEX, msg=CONF_RELATIVE_URL_ERROR_MSG),
vol.Url()),
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_ENTITIES): [ENTITIES_SCHEMA],
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the weblink component."""
links = config.get(DOMAIN)
for link in links.get(CONF_ENTITIES):
Link(hass, link.get(CONF_NAME), link.get(CONF_URL),
link.get(CONF_ICON))
return True
class Link(Entity):
"""Representation of a link."""
def __init__(self, hass, name, url, icon):
"""Initialize the link."""
self.hass = hass
self._name = name
self._url = url
self._icon = icon
self.entity_id = DOMAIN + '.%s' % slugify(name)
self.schedule_update_ha_state()
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def name(self):
"""Return the name of the URL."""
return self._name
@property
def state(self):
"""Return the URL."""
return self._url
|
DiptoDas8/Biponi
|
refs/heads/master
|
lib/python2.7/site-packages/setuptools/site-patch.py
|
720
|
def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
|
LiveZenLK/CeygateERP
|
refs/heads/master
|
addons/mail/controllers/bus.py
|
4
|
# -*- coding: utf-8 -*
import base64
import openerp
from openerp import SUPERUSER_ID
from openerp.http import request
class MailChatController(openerp.addons.bus.controllers.main.BusController):
def _default_request_uid(self):
""" For Anonymous people, they receive the access right of SUPERUSER_ID since they have NO access (auth=none)
!!! Each time a method from this controller is call, there is a check if the user (who can be anonymous and Sudo access)
can access to the resource.
"""
return request.session.uid and request.session.uid or SUPERUSER_ID
# --------------------------
# Extends BUS Controller Poll
# --------------------------
def _poll(self, dbname, channels, last, options):
if request.session.uid:
partner_id = request.env.user.partner_id.id
if partner_id:
for mail_channel in request.env['mail.channel'].search([('channel_partner_ids', 'in', [partner_id])]):
channels.append((request.db, 'mail.channel', mail_channel.id))
# personal and needaction channel
channels.append((request.db, 'res.partner', partner_id))
channels.append((request.db, 'ir.needaction', partner_id))
return super(MailChatController, self)._poll(dbname, channels, last, options)
# --------------------------
# Anonymous routes (Common Methods)
# --------------------------
@openerp.http.route('/mail/chat_post', type="json", auth="none")
def mail_chat_post(self, uuid, message_content, **kwargs):
request_uid = self._default_request_uid()
# find the author from the user session, which can be None
author_id = False # message_post accept 'False' author_id, but not 'None'
if request.session.uid:
author_id = request.env['res.users'].sudo().browse(request.session.uid).partner_id.id
# post a message without adding followers to the channel. email_from=False avoid to get author from email data
mail_channel = request.env["mail.channel"].sudo(request_uid).search([('uuid', '=', uuid)], limit=1)
message = mail_channel.sudo(request_uid).with_context(mail_create_nosubscribe=True).message_post(author_id=author_id, email_from=False, body=message_content, message_type='comment', subtype='mail.mt_comment', content_subtype='plaintext', **kwargs)
return message and message.id or False
@openerp.http.route(['/mail/chat_history'], type="json", auth="none")
def mail_chat_history(self, uuid, last_id=False, limit=20):
request_uid = self._default_request_uid()
channel = request.env["mail.channel"].sudo(request_uid).search([('uuid', '=', uuid)], limit=1)
return channel.sudo(request_uid).channel_fetch_message(last_id, limit)
@openerp.http.route('/mail/chat_init', type="json", auth="none")
def mail_chat_init(self):
result = {
'emoji': request.env['mail.shortcode'].sudo().search_read([('shortcode_type', '=', 'image')], ['source', 'substitution', 'description'])
}
# include the previous notifications, only for identified user
if request.session.uid:
request_uid = self._default_request_uid()
result['notifications'] = request.env['mail.channel'].sudo(request_uid).get_init_notifications()
return result
|
rnixx/python-for-android
|
refs/heads/master
|
tests/test_androidmodule_ctypes_finder.py
|
4
|
# This test is still expected to support Python 2, as it tests
# on-Android functionality that we still maintain
try: # Python 3+
from unittest import mock
from unittest.mock import MagicMock
except ImportError: # Python 2
import mock
from mock import MagicMock
import os
import shutil
import sys
import tempfile
# Import the tested android._ctypes_library_finder module,
# making sure android._android won't crash us!
# (since android._android is android-only / not compilable on desktop)
android_module_folder = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"..", "pythonforandroid", "recipes", "android", "src"
))
sys.path.insert(0, android_module_folder)
sys.modules['android._android'] = MagicMock()
import android._ctypes_library_finder
sys.path.remove(android_module_folder)
@mock.patch.dict('sys.modules', jnius=MagicMock())
def test_get_activity_lib_dir():
import jnius # should get us our fake module
# Short test that it works when activity doesn't exist:
jnius.autoclass = MagicMock()
jnius.autoclass.return_value = None
assert android._ctypes_library_finder.get_activity_lib_dir(
"JavaClass"
) is None
assert mock.call("JavaClass") in jnius.autoclass.call_args_list
# Comprehensive test that verifies getApplicationInfo() call:
activity = MagicMock()
app_context = activity.getApplicationContext()
app_context.getPackageName.return_value = "test.package"
app_info = app_context.getPackageManager().getApplicationInfo()
app_info.nativeLibraryDir = '/testpath'
def pick_class(name):
cls = MagicMock()
if name == "JavaClass":
cls.mActivity = activity
elif name == "android.content.pm.PackageManager":
# Manager class:
cls.GET_SHARED_LIBRARY_FILES = 1024
return cls
jnius.autoclass = MagicMock(side_effect=pick_class)
assert android._ctypes_library_finder.get_activity_lib_dir(
"JavaClass"
) == "/testpath"
assert mock.call("JavaClass") in jnius.autoclass.call_args_list
assert mock.call("test.package", 1024) in (
app_context.getPackageManager().getApplicationInfo.call_args_list
)
@mock.patch.dict('sys.modules', jnius=MagicMock())
def test_find_library():
test_d = tempfile.mkdtemp(prefix="p4a-android-ctypes-test-libdir-")
try:
with open(os.path.join(test_d, "mymadeuplib.so.5"), "w"):
pass
import jnius # should get us our fake module
# Test with mActivity returned:
jnius.autoclass = MagicMock()
jnius.autoclass().mService = None
app_context = jnius.autoclass().mActivity.getApplicationContext()
app_info = app_context.getPackageManager().getApplicationInfo()
app_info.nativeLibraryDir = '/doesnt-exist-testpath'
assert android._ctypes_library_finder.find_library(
"mymadeuplib"
) is None
assert mock.call("org.kivy.android.PythonActivity") in (
jnius.autoclass.call_args_list
)
app_info.nativeLibraryDir = test_d
assert os.path.normpath(android._ctypes_library_finder.find_library(
"mymadeuplib"
)) == os.path.normpath(os.path.join(test_d, "mymadeuplib.so.5"))
# Test with mService returned:
jnius.autoclass = MagicMock()
jnius.autoclass().mActivity = None
app_context = jnius.autoclass().mService.getApplicationContext()
app_info = app_context.getPackageManager().getApplicationInfo()
app_info.nativeLibraryDir = '/doesnt-exist-testpath'
assert android._ctypes_library_finder.find_library(
"mymadeuplib"
) is None
app_info.nativeLibraryDir = test_d
assert os.path.normpath(android._ctypes_library_finder.find_library(
"mymadeuplib"
)) == os.path.normpath(os.path.join(test_d, "mymadeuplib.so.5"))
finally:
shutil.rmtree(test_d)
def test_does_libname_match_filename():
assert android._ctypes_library_finder.does_libname_match_filename(
"mylib", "mylib.so"
)
assert not android._ctypes_library_finder.does_libname_match_filename(
"mylib", "amylib.so"
)
assert not android._ctypes_library_finder.does_libname_match_filename(
"mylib", "mylib.txt"
)
assert not android._ctypes_library_finder.does_libname_match_filename(
"mylib", "mylib"
)
assert android._ctypes_library_finder.does_libname_match_filename(
"mylib", "libmylib.test.so.1.2.3"
)
assert not android._ctypes_library_finder.does_libname_match_filename(
"mylib", "libtest.mylib.so"
)
assert android._ctypes_library_finder.does_libname_match_filename(
"mylib", "mylib.so.5"
)
|
mlperf/training_results_v0.6
|
refs/heads/master
|
Google/benchmarks/gnmt/implementations/tpu-v3-512-gnmt/nmt/estimator.py
|
5
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator functions supporting running on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import tensorflow as tf
import low_level_runner
import metric
import model
from utils import iterator_utils
from utils import vocab_utils
def make_model_fn(hparams):
"""Construct a GNMT model function for training."""
def _model_fn(features, labels, mode, params):
"""Model function."""
del labels, params
# Create a GNMT model for training.
gnmt_model = model.BaseModel(hparams, mode=mode, features=features)
if mode == tf.contrib.learn.ModeKeys.INFER:
predicted_ids = gnmt_model.predicted_ids
# make sure outputs is of shape [batch_size, time] or [beam_width,
# batch_size, time] when using beam search.
predicted_ids = tf.transpose(predicted_ids, [2, 1, 0])
# Get the top predictions from beam search.
predicted_ids = tf.gather_nd(predicted_ids, [0])
predictions = {"predictions": predicted_ids}
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions)
elif mode == tf.contrib.learn.ModeKeys.TRAIN:
loss = tf.zeros([], dtype=tf.float32)
train_op = gnmt_model.update
else:
raise ValueError("Unknown mode in model_fn: %s" % mode)
if hparams.use_tpu:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
return _model_fn
def make_input_fn(hparams, mode):
"""Construct a input function for training."""
def _input_fn(params):
"""Input function."""
if mode == tf.contrib.learn.ModeKeys.TRAIN:
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
else:
src_file = "%s.%s" % (hparams.test_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.test_prefix, hparams.tgt)
src_vocab_file = hparams.src_vocab_file
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file)
if mode == tf.contrib.learn.ModeKeys.TRAIN:
if "context" in params:
batch_size = params["batch_size"]
global_batch_size = batch_size
num_hosts = params["context"].num_hosts
# TODO(dehao): update to use current_host once available in API.
current_host = params["context"].current_input_fn_deployment()[1]
else:
if "dataset_index" in params:
current_host = params["dataset_index"]
num_hosts = params["dataset_num_shards"]
batch_size = params["batch_size"]
global_batch_size = hparams.batch_size
else:
num_hosts = 1
current_host = 0
batch_size = hparams.batch_size
global_batch_size = batch_size
if not hparams.use_preprocessed_data:
src_dataset = tf.data.TextLineDataset(src_file)
tgt_dataset = tf.data.TextLineDataset(tgt_file)
return iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size=batch_size,
global_batch_size=global_batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
output_buffer_size=None,
skip_count=None,
num_shards=num_hosts,
shard_index=current_host,
reshuffle_each_iteration=True,
filter_oversized_sequences=True)
else:
return iterator_utils.get_preprocessed_iterator(
hparams.train_prefix + "*",
batch_size=batch_size,
random_seed=hparams.random_seed,
max_seq_len=hparams.src_max_len,
num_buckets=hparams.num_buckets,
shard_index=current_host,
num_shards=num_hosts)
else:
if "dataset_index" in params:
current_host = params["dataset_index"]
num_hosts = params["dataset_num_shards"]
else:
num_hosts = 1
current_host = 0
if "infer_batch_size" in params:
batch_size = params["infer_batch_size"]
else:
batch_size = hparams.infer_batch_size
src_dataset = tf.data.TextLineDataset(src_file)
src_dataset = src_dataset.repeat().batch(
hparams.infer_batch_size // num_hosts).shard(
num_hosts, current_host).apply(tf.contrib.data.unbatch())
return iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size,
eos=hparams.eos,
sos=hparams.sos,
src_max_len=hparams.src_max_len_infer)
def _synthetic_input_fn(params):
"""Fake inputs for debugging and benchmarking."""
del params
batch_size = hparams.batch_size
src_max_len = hparams.src_max_len
tgt_max_len = hparams.tgt_max_len
features = {
"source":
tf.random_uniform(
dtype=tf.int32,
minval=1,
maxval=10,
seed=1,
shape=(batch_size, src_max_len)),
"target_input":
tf.random_uniform(
dtype=tf.int32,
minval=1,
maxval=10,
seed=2,
shape=(batch_size, tgt_max_len)),
"target_output":
tf.random_uniform(
dtype=tf.int32,
minval=1,
maxval=10,
seed=3,
shape=(batch_size, tgt_max_len)),
"source_sequence_length":
tf.constant([src_max_len] * batch_size),
"target_sequence_length":
tf.constant([tgt_max_len] * batch_size)
}
return features
if hparams.use_synthetic_data and mode == tf.contrib.learn.ModeKeys.TRAIN:
return _synthetic_input_fn
else:
return _input_fn
def _get_tgt_sos_eos_id(hparams):
with tf.Session() as sess:
_, tgt_vocab_table = vocab_utils.create_vocab_tables(
hparams.src_vocab_file)
tgt_sos_id = tf.cast(
tgt_vocab_table.lookup(tf.constant(hparams.sos)), tf.int32)
tgt_eos_id = tf.cast(
tgt_vocab_table.lookup(tf.constant(hparams.eos)), tf.int32)
sess.run(tf.tables_initializer())
tgt_sos_id = sess.run(tgt_sos_id, {})
tgt_eos_id = sess.run(tgt_eos_id, {})
return tgt_sos_id, tgt_eos_id
def create_train_runner(hparams, eval_steps=0):
steps_per_epoch = int(hparams.num_examples_per_epoch/hparams.batch_size)
return low_level_runner.LowLevelRunner(
train_iterations=steps_per_epoch,
eval_steps=eval_steps,
hparams=hparams,
per_host_v1=True)
def create_eval_runner(hparams):
hparams.tgt_sos_id, hparams.tgt_eos_id = 1, 2
eval_steps = int(math.ceil(
hparams.examples_to_infer / hparams.infer_batch_size))
return low_level_runner.LowLevelRunner(
eval_steps=eval_steps, hparams=hparams, train_iterations=0)
def create_eval_runner_and_build_graph(hparams, model_fn):
runner = create_eval_runner(hparams)
input_fn = make_input_fn(hparams, tf.contrib.learn.ModeKeys.INFER)
params = {
"infer_batch_size": int(hparams.infer_batch_size / hparams.num_shards)
}
runner.initialize(None, input_fn, params)
runner.build_model(model_fn, params)
return runner
def train_fn(hparams):
"""Train function."""
hparams.tgt_sos_id, hparams.tgt_eos_id = _get_tgt_sos_eos_id(hparams)
model_fn = make_model_fn(hparams)
runner = create_train_runner(hparams)
input_fn = make_input_fn(hparams, tf.contrib.learn.ModeKeys.TRAIN)
runner.initialize(input_fn, None, {})
runner.build_model(model_fn, {})
runner.train(0, hparams.num_train_steps)
return 0.0
def train_and_eval_with_low_level_api(hparams):
"""Train and evaluation function."""
# pylint: disable=protected-access
hparams.tgt_sos_id, hparams.tgt_eos_id = 1, 2
model_fn = make_model_fn(hparams)
eval_steps = int(
math.ceil(hparams.examples_to_infer / hparams.infer_batch_size))
runner = create_train_runner(hparams, eval_steps)
train_input_fn = make_input_fn(hparams, tf.contrib.learn.ModeKeys.TRAIN)
eval_input_fn = make_input_fn(hparams, tf.contrib.learn.ModeKeys.INFER)
params = {
"infer_batch_size": int(hparams.infer_batch_size / hparams.num_shards)
}
runner.initialize(train_input_fn, eval_input_fn, params)
runner.build_model(model_fn, params)
return runner.train_and_predict()
def eval_fn(hparams):
"""Inference function."""
hparams.tgt_sos_id, hparams.tgt_eos_id = _get_tgt_sos_eos_id(hparams)
model_fn = make_model_fn(hparams)
eval_runner = create_eval_runner_and_build_graph(hparams, model_fn)
predictions = list(eval_runner.predict())
checkpoint_path = tf.train.latest_checkpoint(hparams.out_dir)
current_step = int(os.path.basename(checkpoint_path).split("-")[1])
return metric.get_metric(hparams, predictions, current_step)
|
TRUFIL/erpnext
|
refs/heads/develop
|
erpnext/buying/doctype/request_for_quotation/test_request_for_quotation.py
|
13
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from erpnext.templates.pages.rfq import check_supplier_has_docname_access
from frappe.utils import nowdate
class TestRequestforQuotation(unittest.TestCase):
def test_quote_status(self):
from erpnext.buying.doctype.request_for_quotation.request_for_quotation import make_supplier_quotation
rfq = make_request_for_quotation()
self.assertEquals(rfq.get('suppliers')[0].quote_status, 'Pending')
self.assertEquals(rfq.get('suppliers')[1].quote_status, 'Pending')
# Submit the first supplier quotation
sq = make_supplier_quotation(rfq.name, rfq.get('suppliers')[0].supplier)
sq.submit()
# No Quote first supplier quotation
rfq.get('suppliers')[1].no_quote = 1
rfq.get('suppliers')[1].quote_status = 'No Quote'
rfq.update_rfq_supplier_status() #rfq.get('suppliers')[1].supplier)
self.assertEquals(rfq.get('suppliers')[0].quote_status, 'Received')
self.assertEquals(rfq.get('suppliers')[1].quote_status, 'No Quote')
def test_make_supplier_quotation(self):
from erpnext.buying.doctype.request_for_quotation.request_for_quotation import make_supplier_quotation
rfq = make_request_for_quotation()
sq = make_supplier_quotation(rfq.name, rfq.get('suppliers')[0].supplier)
sq.submit()
sq1 = make_supplier_quotation(rfq.name, rfq.get('suppliers')[1].supplier)
sq1.submit()
self.assertEquals(sq.supplier, rfq.get('suppliers')[0].supplier)
self.assertEquals(sq.get('items')[0].request_for_quotation, rfq.name)
self.assertEquals(sq.get('items')[0].item_code, "_Test Item")
self.assertEquals(sq.get('items')[0].qty, 5)
self.assertEquals(sq1.supplier, rfq.get('suppliers')[1].supplier)
self.assertEquals(sq1.get('items')[0].request_for_quotation, rfq.name)
self.assertEquals(sq1.get('items')[0].item_code, "_Test Item")
self.assertEquals(sq1.get('items')[0].qty, 5)
def test_make_supplier_quotation_with_special_characters(self):
from erpnext.buying.doctype.request_for_quotation.request_for_quotation import make_supplier_quotation
frappe.delete_doc_if_exists("Supplier", "_Test Supplier '1", force=1)
supplier = frappe.new_doc("Supplier")
supplier.supplier_name = "_Test Supplier '1"
supplier.supplier_type = "_Test Supplier Type"
supplier.insert()
rfq = make_request_for_quotation(supplier_wt_appos)
sq = make_supplier_quotation(rfq.name, supplier_wt_appos[0].get("supplier"))
sq.submit()
frappe.form_dict = frappe.local("form_dict")
frappe.form_dict.name = rfq.name
self.assertEqual(
check_supplier_has_docname_access(supplier_wt_appos[0].get('supplier')),
True
)
# reset form_dict
frappe.form_dict.name = None
def test_make_supplier_quotation_from_portal(self):
from erpnext.buying.doctype.request_for_quotation.request_for_quotation import create_supplier_quotation
rfq = make_request_for_quotation()
rfq.get('items')[0].rate = 100
rfq.supplier = rfq.suppliers[0].supplier
supplier_quotation_name = create_supplier_quotation(rfq)
supplier_quotation_doc = frappe.get_doc('Supplier Quotation', supplier_quotation_name)
self.assertEquals(supplier_quotation_doc.supplier, rfq.get('suppliers')[0].supplier)
self.assertEquals(supplier_quotation_doc.get('items')[0].request_for_quotation, rfq.name)
self.assertEquals(supplier_quotation_doc.get('items')[0].item_code, "_Test Item")
self.assertEquals(supplier_quotation_doc.get('items')[0].qty, 5)
self.assertEquals(supplier_quotation_doc.get('items')[0].amount, 500)
def make_request_for_quotation(supplier_data=None):
"""
:param supplier_data: List containing supplier data
"""
supplier_data = supplier_data if supplier_data else get_supplier_data()
rfq = frappe.new_doc('Request for Quotation')
rfq.transaction_date = nowdate()
rfq.status = 'Draft'
rfq.company = '_Test Company'
rfq.message_for_supplier = 'Please supply the specified items at the best possible rates.'
for data in supplier_data:
rfq.append('suppliers', data)
rfq.append("items", {
"item_code": "_Test Item",
"description": "_Test Item",
"uom": "_Test UOM",
"qty": 5,
"warehouse": "_Test Warehouse - _TC",
"schedule_date": nowdate()
})
rfq.submit()
return rfq
def get_supplier_data():
return [{
"supplier": "_Test Supplier",
"supplier_name": "_Test Supplier"
},
{
"supplier": "_Test Supplier 1",
"supplier_name": "_Test Supplier 1"
}]
supplier_wt_appos = [{
"supplier": "_Test Supplier '1",
"supplier_name": "_Test Supplier '1",
}]
|
tkovalsky/folio
|
refs/heads/master
|
folio/users/views.py
|
95
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', ]
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
|
pilou-/ansible-modules-core
|
refs/heads/devel
|
cloud/google/gc_storage.py
|
21
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gc_storage
version_added: "1.4"
short_description: This module manages objects/buckets in Google Cloud Storage.
description:
- This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for information about setting the default project.
options:
bucket:
description:
- Bucket name.
required: true
object:
description:
- Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
required: false
default: null
src:
description:
- The source file path when performing a PUT operation.
required: false
default: null
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
force:
description:
- Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
required: false
default: true
aliases: [ 'overwrite' ]
permission:
description:
- This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'authenticated-read'.
required: false
default: private
headers:
version_added: "2.0"
description:
- Headers to attach to object.
required: false
default: '{}'
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only available when public-read is the acl for the object.
required: false
default: null
mode:
description:
- Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket).
required: true
default: null
choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
gcs_secret_key:
description:
- GCS secret key. If not set then the value of the GCS_SECRET_KEY environment variable is used.
required: true
default: null
gcs_access_key:
description:
- GCS access key. If not set then the value of the GCS_ACCESS_KEY environment variable is used.
required: true
default: null
requirements:
- "python >= 2.6"
- "boto >= 2.9"
author: "Benno Joy (@bennojoy)"
'''
EXAMPLES = '''
# upload some content
- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt mode=put permission=public-read
# upload some headers
- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt headers='{"Content-Encoding": "gzip"}'
# download some content
- gc_storage: bucket=mybucket object=key.txt dest=/usr/local/myfile.txt mode=get
# Download an object as a string to use else where in your playbook
- gc_storage: bucket=mybucket object=key.txt mode=get_str
# Create an empty bucket
- gc_storage: bucket=mybucket mode=create
# Create a bucket with key as directory
- gc_storage: bucket=mybucket object=/my/directory/path mode=create
# Delete a bucket and all contents
- gc_storage: bucket=mybucket mode=delete
'''
import os
import urlparse
import hashlib
try:
import boto
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def grant_check(module, gs, obj):
try:
acp = obj.get_acl()
if module.params.get('permission') == 'public-read':
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
if not grant:
obj.set_acl('public-read')
module.exit_json(changed=True, result="The objects permission as been set to public-read")
if module.params.get('permission') == 'authenticated-read':
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
if not grant:
obj.set_acl('authenticated-read')
module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
return True
def key_check(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key_check = bucket.get_key(obj)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if key_check:
grant_check(module, gs, key_check)
return True
else:
return False
def keysum(module, gs, bucket, obj):
bucket = gs.lookup(bucket)
key_check = bucket.get_key(obj)
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, gs, bucket):
try:
result = gs.lookup(bucket)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if result:
grant_check(module, gs, result)
return True
else:
return False
def create_bucket(module, gs, bucket):
try:
bucket = gs.create_bucket(bucket)
bucket.set_acl(module.params.get('permission'))
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if bucket:
return True
def delete_bucket(module, gs, bucket):
try:
bucket = gs.lookup(bucket)
bucket_contents = bucket.list()
for key in bucket_contents:
bucket.delete_key(key.name)
bucket.delete()
return True
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def delete_key(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket ", changed=True)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def create_dirkey(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def upload_file_check(src):
if os.path.exists(src):
file_exists is True
else:
file_exists is False
if os.path.isdir(src):
module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True)
return file_exists
def path_check(path):
if os.path.exists(path):
return True
else:
return False
def transform_headers(headers):
"""
Boto url-encodes values unless we convert the value to `str`, so doing
this prevents 'max-age=100000' from being converted to "max-age%3D100000".
:param headers: Headers to convert
:type headers: dict
:rtype: dict
"""
for key, value in headers.items():
headers[key] = str(value)
return headers
def upload_gsfile(module, gs, bucket, obj, src, expiry):
try:
bucket = gs.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_filename(
filename=src,
headers=transform_headers(module.params.get('headers'))
)
key.set_acl(module.params.get('permission'))
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except gs.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_gsfile(module, gs, bucket, obj, dest):
try:
bucket = gs.lookup(bucket)
key = bucket.lookup(obj)
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except gs.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_gsstr(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key = bucket.lookup(obj)
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except gs.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def get_download_url(module, gs, bucket, obj, expiry):
try:
bucket = gs.lookup(bucket)
key = bucket.lookup(obj)
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def handle_get(module, gs, bucket, obj, overwrite, dest):
md5_remote = keysum(module, gs, bucket, obj)
md5_local = module.md5(dest)
if md5_local == md5_remote:
module.exit_json(changed=False)
if md5_local != md5_remote and not overwrite:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
else:
download_gsfile(module, gs, bucket, obj, dest)
def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
# Lets check to see if bucket exists to get ground truth.
bucket_rc = bucket_check(module, gs, bucket)
key_rc = key_check(module, gs, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucket_rc and key_rc:
md5_remote = keysum(module, gs, bucket, obj)
md5_local = module.md5(src)
if md5_local == md5_remote:
module.exit_json(msg="Local and remote object are identical", changed=False)
if md5_local != md5_remote and not overwrite:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
else:
upload_gsfile(module, gs, bucket, obj, src, expiration)
if not bucket_rc:
create_bucket(module, gs, bucket)
upload_gsfile(module, gs, bucket, obj, src, expiration)
# If bucket exists but key doesn't, just upload.
if bucket_rc and not key_rc:
upload_gsfile(module, gs, bucket, obj, src, expiration)
def handle_delete(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=delete_bucket(module, gs, bucket))
else:
module.exit_json(msg="Bucket does not exist.", changed=False)
if bucket and obj:
if bucket_check(module, gs, bucket):
if key_check(module, gs, bucket, obj):
module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj))
else:
module.exit_json(msg="Object does not exists.", changed=False)
else:
module.exit_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
def handle_create(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
module.exit_json(msg="Bucket already exists.", changed=False)
else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
if bucket and obj:
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if bucket_check(module, gs, bucket):
if key_check(module, gs, bucket, dirobj):
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:
create_dirkey(module, gs, bucket, dirobj)
else:
create_bucket(module, gs, bucket)
create_dirkey(module, gs, bucket, dirobj)
def main():
module = AnsibleModule(
argument_spec = dict(
bucket = dict(required=True),
object = dict(default=None),
src = dict(default=None),
dest = dict(default=None),
expiration = dict(default=600, aliases=['expiry']),
mode = dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
permission = dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
headers = dict(type='dict', default={}),
gs_secret_key = dict(no_log=True, required=True),
gs_access_key = dict(required=True),
overwrite = dict(default=True, type='bool', aliases=['force']),
),
)
if not HAS_BOTO:
module.fail_json(msg='boto 2.9+ required for this module')
bucket = module.params.get('bucket')
obj = module.params.get('object')
src = module.params.get('src')
dest = module.params.get('dest')
if dest:
dest = os.path.expanduser(dest)
mode = module.params.get('mode')
expiry = module.params.get('expiration')
gs_secret_key = module.params.get('gs_secret_key')
gs_access_key = module.params.get('gs_access_key')
overwrite = module.params.get('overwrite')
if mode == 'put':
if not src or not object:
module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters")
if mode == 'get':
if not dest or not object:
module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters")
if obj:
obj = os.path.expanduser(module.params['object'])
try:
gs = boto.connect_gs(gs_access_key, gs_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
if mode == 'get':
if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
module.fail_json(msg="Target bucket/key cannot be found", failed=True)
if not path_check(dest):
download_gsfile(module, gs, bucket, obj, dest)
else:
handle_get(module, gs, bucket, obj, overwrite, dest)
if mode == 'put':
if not path_check(src):
module.fail_json(msg="Local object for PUT does not exist", failed=True)
handle_put(module, gs, bucket, obj, overwrite, src, expiry)
# Support for deleting an object if we have both params.
if mode == 'delete':
handle_delete(module, gs, bucket, obj)
if mode == 'create':
handle_create(module, gs, bucket, obj)
if mode == 'get_url':
if bucket and obj:
if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
get_download_url(module, gs, bucket, obj, expiry)
else:
module.fail_json(msg="Key/Bucket does not exist", failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
# --------------------------- Get the String contents of an Object -------------------------
if mode == 'get_str':
if bucket and obj:
if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
download_gsstr(module, gs, bucket, obj)
else:
module.fail_json(msg="Key/Bucket does not exist", failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
mdanielwork/intellij-community
|
refs/heads/master
|
python/testData/testRunner/env/nose/test_with_slow/test_with_slow.py
|
30
|
from nose.plugins.attrib import attr
def test_fast():
pass
@attr('slow')
def test_Slow():
pass
|
akhilaananthram/nupic
|
refs/heads/master
|
examples/opf/experiments/anomaly/spatial/2field_few_skewed/description.py
|
160
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [claModelControlEnableSPLearningCb, claModelControlEnableTPLearningCb],
# 'setup' : [claModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
MartinDelzant/scikit-learn
|
refs/heads/master
|
sklearn/utils/sparsetools/_graph_validation.py
|
364
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc
from ._graph_tools import csgraph_to_dense, csgraph_from_dense,\
csgraph_masked_from_dense, csgraph_from_masked
DTYPE = np.float64
def validate_graph(csgraph, directed, dtype=DTYPE,
csr_output=True, dense_output=True,
copy_if_dense=False, copy_if_sparse=False,
null_value_in=0, null_value_out=np.inf,
infinity_null=True, nan_null=True):
"""Routine for validation and conversion of csgraph inputs"""
if not (csr_output or dense_output):
raise ValueError("Internal: dense or csr output must be true")
# if undirected and csc storage, then transposing in-place
# is quicker than later converting to csr.
if (not directed) and isspmatrix_csc(csgraph):
csgraph = csgraph.T
if isspmatrix(csgraph):
if csr_output:
csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)
else:
csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
elif np.ma.is_masked(csgraph):
if dense_output:
mask = csgraph.mask
csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_masked(csgraph)
else:
if dense_output:
csgraph = csgraph_masked_from_dense(csgraph,
copy=copy_if_dense,
null_value=null_value_in,
nan_null=nan_null,
infinity_null=infinity_null)
mask = csgraph.mask
csgraph = np.asarray(csgraph.data, dtype=DTYPE)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
infinity_null=infinity_null,
nan_null=nan_null)
if csgraph.ndim != 2:
raise ValueError("compressed-sparse graph must be two dimensional")
if csgraph.shape[0] != csgraph.shape[1]:
raise ValueError("compressed-sparse graph must be shape (N, N)")
return csgraph
|
mbox/django
|
refs/heads/master
|
django/contrib/staticfiles/utils.py
|
114
|
import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
|
cbitstech/Purple-Robot-Django
|
refs/heads/master
|
migrations/0034_auto__add_purplerobotdevicenote.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PurpleRobotDeviceNote'
db.create_table(u'purple_robot_app_purplerobotdevicenote', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('device', self.gf('django.db.models.fields.related.ForeignKey')(related_name='notes', to=orm['purple_robot_app.PurpleRobotDevice'])),
('note', self.gf('django.db.models.fields.TextField')(max_length=1024)),
('added', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'purple_robot_app', ['PurpleRobotDeviceNote'])
def backwards(self, orm):
# Deleting model 'PurpleRobotDeviceNote'
db.delete_table(u'purple_robot_app_purplerobotdevicenote')
models = {
u'purple_robot_app.purplerobotalert': {
'Meta': {'object_name': 'PurpleRobotAlert'},
'action_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'dismissed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'generated': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manually_dismissed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'probe': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'severity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
u'purple_robot_app.purplerobotconfiguration': {
'Meta': {'object_name': 'PurpleRobotConfiguration'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'contents': ('django.db.models.fields.TextField', [], {'max_length': '1048576'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '1024'})
},
u'purple_robot_app.purplerobotdevice': {
'Meta': {'object_name': 'PurpleRobotDevice'},
'config_last_fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'config_last_user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'configuration': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'devices'", 'null': 'True', 'to': u"orm['purple_robot_app.PurpleRobotConfiguration']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1048576', 'null': 'True', 'blank': 'True'}),
'device_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'devices'", 'null': 'True', 'to': u"orm['purple_robot_app.PurpleRobotDeviceGroup']"}),
'device_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256', 'db_index': 'True'}),
'hash_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'performance_metadata': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'max_length': '1048576'})
},
u'purple_robot_app.purplerobotdevicegroup': {
'Meta': {'object_name': 'PurpleRobotDeviceGroup'},
'configuration': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'groups'", 'null': 'True', 'to': u"orm['purple_robot_app.PurpleRobotConfiguration']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1048576', 'null': 'True', 'blank': 'True'}),
'group_id': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'purple_robot_app.purplerobotdevicenote': {
'Meta': {'object_name': 'PurpleRobotDeviceNote'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': u"orm['purple_robot_app.PurpleRobotDevice']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '1024'})
},
u'purple_robot_app.purplerobotevent': {
'Meta': {'object_name': 'PurpleRobotEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {'max_length': '8388608', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'purple_robot_app.purplerobotexportjob': {
'Meta': {'object_name': 'PurpleRobotExportJob'},
'destination': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'export_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'probes': ('django.db.models.fields.TextField', [], {'max_length': '8196', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '512'}),
'users': ('django.db.models.fields.TextField', [], {'max_length': '8196', 'null': 'True', 'blank': 'True'})
},
u'purple_robot_app.purplerobotpayload': {
'Meta': {'object_name': 'PurpleRobotPayload'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'errors': ('django.db.models.fields.TextField', [], {'max_length': '65536', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {'max_length': '8388608'}),
'process_tags': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'purple_robot_app.purplerobotreading': {
'Meta': {'object_name': 'PurpleRobotReading', 'index_together': "[['probe', 'user_id'], ['logged', 'user_id'], ['probe', 'logged', 'user_id']]"},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {'max_length': '8388608'}),
'probe': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'purple_robot_app.purplerobotreport': {
'Meta': {'object_name': 'PurpleRobotReport'},
'generated': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'probe': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'report_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'purple_robot_app.purplerobottest': {
'Meta': {'object_name': 'PurpleRobotTest'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'frequency': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {}),
'probe': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'report': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
}
}
complete_apps = ['purple_robot_app']
|
paplorinc/intellij-community
|
refs/heads/master
|
python/testData/refactoring/makeFunctionTopLevel/methodSelfUsedAsOperand.py
|
44
|
class C(int):
def me<caret>thod(self):
print(self + 42)
|
grpc/grpc
|
refs/heads/master
|
bazel/test/python_test_repo/helloworld.py
|
13
|
# Copyright 2019 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
import contextlib
import datetime
import logging
import unittest
import grpc
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from concurrent import futures
import helloworld_pb2
import helloworld_pb2_grpc
_HOST = 'localhost'
_SERVER_ADDRESS = '{}:0'.format(_HOST)
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
request_in_flight = datetime.datetime.now() - \
request.request_initiation.ToDatetime()
request_duration = duration_pb2.Duration()
request_duration.FromTimedelta(request_in_flight)
return helloworld_pb2.HelloReply(
message='Hello, %s!' % request.name,
request_duration=request_duration,
)
@contextlib.contextmanager
def _listening_server():
server = grpc.server(futures.ThreadPoolExecutor())
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
port = server.add_insecure_port(_SERVER_ADDRESS)
server.start()
try:
yield port
finally:
server.stop(0)
class ImportTest(unittest.TestCase):
def test_import(self):
with _listening_server() as port:
with grpc.insecure_channel('{}:{}'.format(_HOST, port)) as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
request_timestamp = timestamp_pb2.Timestamp()
request_timestamp.GetCurrentTime()
response = stub.SayHello(helloworld_pb2.HelloRequest(
name='you',
request_initiation=request_timestamp,
),
wait_for_ready=True)
self.assertEqual(response.message, "Hello, you!")
self.assertGreater(response.request_duration.nanos, 0)
if __name__ == '__main__':
logging.basicConfig()
unittest.main()
|
paulot/NodeVector
|
refs/heads/master
|
node-0.12.0/deps/v8/tools/testrunner/__init__.py
|
651
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
ryfeus/lambda-packs
|
refs/heads/master
|
LightGBM_sklearn_scipy_numpy/source/numpy/core/_internal.py
|
9
|
"""
A place for code to be called from core C-code.
Some things are more easily handled Python.
"""
from __future__ import division, absolute_import, print_function
import re
import sys
from numpy.compat import basestring
from .multiarray import dtype, array, ndarray
try:
import ctypes
except ImportError:
ctypes = None
from .numerictypes import object_
if (sys.byteorder == 'little'):
_nbo = b'<'
else:
_nbo = b'>'
def _makenames_list(adict, align):
allfields = []
fnames = list(adict.keys())
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2, 3]:
raise ValueError("entry not a 2- or 3- tuple")
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict, align)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names": names,
"formats": formats,
"offsets": offsets,
"titles": titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('', '|V%d' % num))
offset += num
elif field[1] < offset:
raise ValueError(
"dtype.descr is not defined for types with overlapping or "
"out-of-order fields")
if len(field) > 3:
name = (field[2], field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
if descriptor.itemsize > offset:
num = descriptor.itemsize - offset
result.append(('', '|V%d' % num))
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibility.
def _reconstruct(subtype, shape, dtype):
return ndarray.__new__(subtype, shape, dtype)
# format_re was originally from numarray by J. Todd Miller
format_re = re.compile(br'(?P<order1>[<>|=]?)'
br'(?P<repeats> *[(]?[ ,0-9L]*[)]? *)'
br'(?P<order2>[<>|=]?)'
br'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
sep_re = re.compile(br'\s*,\s*')
space_re = re.compile(br'\s+$')
# astr is a string (perhaps comma separated)
_convorder = {b'=': _nbo}
def _commastring(astr):
startindex = 0
result = []
while startindex < len(astr):
mo = format_re.match(astr, pos=startindex)
try:
(order1, repeats, order2, dtype) = mo.groups()
except (TypeError, AttributeError):
raise ValueError('format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
# Separator or ending padding
if startindex < len(astr):
if space_re.match(astr, pos=startindex):
startindex = len(astr)
else:
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
'format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
if order2 == b'':
order = order1
elif order1 == b'':
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError(
'inconsistent byte-order specification %s and %s' %
(order1, order2))
order = order1
if order in [b'|', b'=', _nbo]:
order = b''
dtype = order + dtype
if (repeats == b''):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
class dummy_ctype(object):
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
return self
def __call__(self, *other):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
def __ne__(self, other):
return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
if val is not None:
return val
if ctypes is None:
import numpy as np
val = dummy_ctype(np.intp)
else:
char = dtype('p').char
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
if ctypes:
self._ctypes = ctypes
else:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
return self.shape_as(_getintp_ctype())
def get_strides(self):
return self.strides_as(_getintp_ctype())
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
def _newnames(datatype, order):
"""
Given a datatype and an order object, return a new names tuple, with the
order indicated
"""
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
seen = set()
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
if name in seen:
raise ValueError("duplicate field name: %s" % (name,))
else:
raise ValueError("unknown field name: %s" % (name,))
seen.add(name)
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
def _copy_fields(ary):
"""Return copy of structured array with padding between fields removed.
Parameters
----------
ary : ndarray
Structured array from which to remove padding bytes
Returns
-------
ary_copy : ndarray
Copy of ary with padding bytes removed
"""
dt = ary.dtype
copy_dtype = {'names': dt.names,
'formats': [dt.fields[name][0] for name in dt.names]}
return array(ary, dtype=copy_dtype, copy=True)
def _getfield_is_safe(oldtype, newtype, offset):
""" Checks safety of getfield for object arrays.
As in _view_is_safe, we need to check that memory containing objects is not
reinterpreted as a non-object datatype and vice versa.
Parameters
----------
oldtype : data-type
Data type of the original ndarray.
newtype : data-type
Data type of the field being accessed by ndarray.getfield
offset : int
Offset of the field being accessed by ndarray.getfield
Raises
------
TypeError
If the field access is invalid
"""
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
if oldtype.names:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
return
raise TypeError("Cannot get/set field of an object array")
return
def _view_is_safe(oldtype, newtype):
""" Checks safety of a view involving object arrays, for example when
doing::
np.zeros(10, dtype=oldtype).view(newtype)
Parameters
----------
oldtype : data-type
Data type of original ndarray
newtype : data-type
Data type of the view
Raises
------
TypeError
If the new type is incompatible with the old type.
"""
# if the types are equivalent, there is no problem.
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
if oldtype == newtype:
return
if newtype.hasobject or oldtype.hasobject:
raise TypeError("Cannot change data-type for object array.")
return
# Given a string containing a PEP 3118 format specifier,
# construct a NumPy dtype
_pep3118_native_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'e': 'e',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'e': 'f2',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
def _dtype_from_pep3118(spec):
class Stream(object):
def __init__(self, s):
self.s = s
self.byteorder = '@'
def advance(self, n):
res = self.s[:n]
self.s = self.s[n:]
return res
def consume(self, c):
if self.s[:len(c)] == c:
self.advance(len(c))
return True
return False
def consume_until(self, c):
if callable(c):
i = 0
while i < len(self.s) and not c(self.s[i]):
i = i + 1
return self.advance(i)
else:
i = self.s.index(c)
res = self.advance(i)
self.advance(len(c))
return res
@property
def next(self):
return self.s[0]
def __bool__(self):
return bool(self.s)
__nonzero__ = __bool__
stream = Stream(spec)
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype
def __dtype_from_pep3118(stream, is_subdtype):
field_spec = dict(
names=[],
formats=[],
offsets=[],
itemsize=0
)
offset = 0
common_alignment = 1
is_padding = False
# Parse spec
while stream:
value = None
# End of structure, bail out to upper level
if stream.consume('}'):
break
# Sub-arrays (1)
shape = None
if stream.consume('('):
shape = stream.consume_until(')')
shape = tuple(map(int, shape.split(',')))
# Byte order
if stream.next in ('@', '=', '<', '>', '^', '!'):
byteorder = stream.advance(1)
if byteorder == '!':
byteorder = '>'
stream.byteorder = byteorder
# Byte order characters also control native vs. standard type sizes
if stream.byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
if itemsize_str:
itemsize = int(itemsize_str)
else:
itemsize = 1
# Data types
is_padding = False
if stream.consume('T{'):
value, align = __dtype_from_pep3118(
stream, is_subdtype=True)
elif stream.next in type_map_chars:
if stream.next == 'Z':
typechar = stream.advance(2)
else:
typechar = stream.advance(1)
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(
stream.byteorder, stream.byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if stream.byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = _lcm(align, common_alignment)
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
if stream.consume(':'):
name = stream.consume_until(':')
else:
name = None
if not (is_padding and name is None):
if name is not None and name in field_spec['names']:
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
% name)
field_spec['names'].append(name)
field_spec['formats'].append(value)
field_spec['offsets'].append(offset)
offset += value.itemsize
offset += extra_offset
field_spec['itemsize'] = offset
# extra final padding for aligned types
if stream.byteorder == '@':
field_spec['itemsize'] += (-offset) % common_alignment
# Check if this was a simple 1-item type, and unwrap it
if (field_spec['names'] == [None]
and field_spec['offsets'][0] == 0
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
and not is_subdtype):
ret = field_spec['formats'][0]
else:
_fix_names(field_spec)
ret = dtype(field_spec)
# Finished
return ret, common_alignment
def _fix_names(field_spec):
""" Replace names which are None with the next unused f%d name """
names = field_spec['names']
for i, name in enumerate(names):
if name is not None:
continue
j = 0
while True:
name = 'f{}'.format(j)
if name not in names:
break
j = j + 1
names[i] = name
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
if value.fields is None:
field_spec = dict(
names=['f0'],
formats=[value],
offsets=[0],
itemsize=value.itemsize
)
else:
fields = value.fields
names = value.names
field_spec = dict(
names=names,
formats=[fields[name][0] for name in names],
offsets=[fields[name][1] for name in names],
itemsize=value.itemsize
)
field_spec['itemsize'] += padding
return dtype(field_spec)
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a % b
return a
def _lcm(a, b):
return a // _gcd(a, b) * b
# Exception used in shares_memory()
class TooHardError(RuntimeError):
pass
class AxisError(ValueError, IndexError):
""" Axis supplied was invalid. """
def __init__(self, axis, ndim=None, msg_prefix=None):
# single-argument form just delegates to base class
if ndim is None and msg_prefix is None:
msg = axis
# do the string formatting here, to save work in the C code
else:
msg = ("axis {} is out of bounds for array of dimension {}"
.format(axis, ndim))
if msg_prefix is not None:
msg = "{}: {}".format(msg_prefix, msg)
super(AxisError, self).__init__(msg)
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
""" Format the error message for when __array_ufunc__ gives up. """
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
['{}={!r}'.format(k, v)
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
return ('operand type(s) all returned NotImplemented from '
'__array_ufunc__({!r}, {!r}, {}): {}'
.format(ufunc, method, args_string, types_string))
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
This is used to construct the first line of the docstring
"""
# input arguments are simple
if ufunc.nin == 1:
in_args = 'x'
else:
in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin))
# output arguments are both keyword or positional
if ufunc.nout == 0:
out_args = ', /, out=()'
elif ufunc.nout == 1:
out_args = ', /, out=None'
else:
out_args = '[, {positional}], / [, out={default}]'.format(
positional=', '.join(
'out{}'.format(i+1) for i in range(ufunc.nout)),
default=repr((None,)*ufunc.nout)
)
# keyword only args depend on whether this is a gufunc
kwargs = (
", casting='same_kind'"
", order='K'"
", dtype=None"
", subok=True"
"[, signature"
", extobj]"
)
if ufunc.signature is None:
kwargs = ", where=True" + kwargs
# join all the parts together
return '{name}({in_args}{out_args}, *{kwargs})'.format(
name=ufunc.__name__,
in_args=in_args,
out_args=out_args,
kwargs=kwargs
)
|
Code4SA/mma-dexter
|
refs/heads/master
|
dexter/analysis/utils.py
|
1
|
from __future__ import division
from collections import defaultdict
import logging
import math
logger = logging.getLogger(__name__)
def calculate_entropy(table):
""" Calculate entropy across +table+, which is a map
representing a table: the keys are the columns and the
values are dicts whose keys in turn are the rows.
The entropy is a measure of how different each column
is to the other columns in the table.
Returns a map from column labels to entropy values.
"""
global logger
logger.debug("Calculating entropy")
col_labels = table.keys()
row_labels = set()
for d in table.itervalues():
row_labels.update(d.keys())
row_labels = list(row_labels)
col_sums = {}
row_sums = defaultdict(int)
total = 0
# sum across all directions
for col in col_labels:
# sum down column
col_sums[col] = sum(table[col].itervalues())
# sum across row
for row, n in table[col].iteritems():
row_sums[row] += n
total += n
# calculate entropy per column
entropy = {}
for col in col_labels:
col_total = col_sums[col]
if col_total == 0:
entropy[col] = 0
continue
row_coverage = defaultdict(int)
col_coverage = 0
for row in row_labels:
# how much does this row contribute to the total
row_fraction = row_sums[row] / total
if row_fraction > 0:
# the fraction this row contributes to the column,
# as a fraction of the total row
row_coverage[row] = table[col].get(row, 0) / col_total / row_fraction
else:
row_coverage[row] = 0
col_coverage += row_coverage[row]
k = 1 / col_coverage
total_p = 0
for row in row_labels:
p = k * row_coverage[row]
if p > 0:
p = p * math.log(p)
total_p += p
if len(row_labels) == 1:
# avoid 1/0
log = 1
else:
log = 1 / math.log(len(row_labels))
entropy[col] = -log * total_p
logger.debug("Done")
return entropy
|
hitmoon/MyRop
|
refs/heads/master
|
gadget.py
|
1
|
import re
from capstone import *
import sys
gadget_arm = [
[b"[\x30-\x3f]\xff\x2f\xe1", 4, 4], # blx, arm mode
[b"[\x10-\x1f]\xff\x2f\xe1", 4, 4], # bx, arm mode
[b"[\x00-\xff]\x80\xbd\xe8", 4, 4], # pop {,pc}, arm mode
[b"[\x00-\xff]\x00\x00\xef", 4, 4], # swi, arm mode
]
gadget_thumb = [
[b"[\x80-\xf8]\x47", 2, 2], # blx, thumb mode
[b"[\x00-\x78]\x47", 2, 2], # bx, thumb mode
[b"[\x00-\xff]\xbd", 2, 2], # pop {,pc}, thumb mode
[b"[\x00-\xff]\xdf", 2, 2], # swi, thumb mode
]
class Gadget:
def __init__(self, binary, depth, mode):
self.__binary = binary
self.__depth = depth
self.__mode = mode
def getROPGadgets(self, section):
arch = self.__binary.getArch()
archMode = self.__binary.getArchMode()
gadgets = []
if arch == CS_ARCH_X86:
gadgets = [
[b"\xc3", 1, 1], # ret
[b"\xc2[\x00-\xff]{2}", 3, 1], # ret <imm>
]
elif arch == CS_ARCH_ARM:
if self.__mode == "arm":
gadgets.extend(gadget_arm)
gadgets.extend(gadget_thumb)
elif self.__mode == "thumb":
gadgets.extend(gadget_thumb)
else:
print "Error: Bad arm mode"
sys.exit(-1)
archMode = CS_MODE_ARM
return self.findGadgets(section, gadgets, arch, archMode)
def findGadgets(self, section, gadgets, arch, archMode):
OP = 0
SIZE = 1
ALIGN = 2
ret = []
md = Cs(arch, archMode)
md.detail = True
md.syntax = CS_OPT_SYNTAX_ATT
for gad in gadgets:
allPos = [ m.start() for m in re.finditer(gad[OP], section["opcodes"])]
for pos in allPos:
for i in range(self.__depth):
if (section["vaddr"] + pos - (i * gad[ALIGN])) % gad[ALIGN] == 0: # align right
asms = md.disasm(section["opcodes"][pos - (i * gad[ALIGN]):pos + gad[SIZE]], section["vaddr"] + pos)
gadget = ""
insns = []
for asm in asms:
insns.append(asm)
gadget += (asm.mnemonic + " " + asm.op_str + " ; ").replace(" ", " ")
#print asm.insn_name()
if len(gadget) > 0:
ret += [ {"vaddr": section["vaddr"] + pos - (i * gad[ALIGN]),
"gadget": gadget,
"codes": section["opcodes"][pos - (i * gad[ALIGN]): pos + gad[SIZE]],
"insn": insns} ]
return ret
|
waveform80/dbsuite
|
refs/heads/master
|
dbsuite/main/__init__.py
|
1
|
# vim: set et sw=4 sts=4:
# Copyright 2012 Dave Hughes.
#
# This file is part of dbsuite.
#
# dbsuite is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# dbsuite is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# dbsuite. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division,
)
import sys
import os
import optparse
import ConfigParser
import logging
import locale
import textwrap
import traceback
import glob
import dbsuite.plugins
from dbsuite.termsize import terminal_size
from dbsuite import __version__
# Use the user's default locale instead of C
locale.setlocale(locale.LC_ALL, '')
class HelpFormatter(optparse.IndentedHelpFormatter):
# Customize the width of help output
def __init__(self):
width = min(130, terminal_size()[0] - 2)
optparse.IndentedHelpFormatter.__init__(self, max_help_position=width // 3, width=width)
class OptionParser(optparse.OptionParser):
# Customize error handling to raise an exception (default simply prints an
# error and terminates execution)
def error(self, msg):
raise optparse.OptParseError(msg)
class Utility(object):
# Get the default output encoding from the default locale
encoding = locale.getdefaultlocale()[1]
# This class is the abstract base class for each of the command line
# utility classes defined below. It provides some basic facilities like an
# option parser, console pretty-printing, logging and exception handling
def __init__(self, usage=None, version=None, description=None):
super(Utility, self).__init__()
self.wrapper = textwrap.TextWrapper()
self.wrapper.width = min(130, terminal_size()[0] - 2)
if usage is None:
usage = self.__doc__.split('\n')[0]
if version is None:
version = '%%prog %s' % __version__
if description is None:
description = self.wrapper.fill('\n'.join(
line.lstrip()
for line in self.__doc__.split('\n')[1:]
if line.lstrip()
))
self.parser = OptionParser(
usage=usage,
version=version,
description=description,
formatter=HelpFormatter()
)
self.parser.set_defaults(
debug=False,
logfile='',
loglevel=logging.WARNING
)
self.parser.add_option(
'-q', '--quiet', dest='loglevel', action='store_const',
const=logging.ERROR, help='produce less console output')
self.parser.add_option(
'-v', '--verbose', dest='loglevel', action='store_const',
const=logging.INFO, help='produce more console output')
self.parser.add_option(
'-l', '--log-file', dest='logfile',
help='log messages to the specified file')
self.parser.add_option(
'-D', '--debug', dest='debug', action='store_true',
help='enables debug mode (runs under PDB)')
def __call__(self, args=None):
if args is None:
args = sys.argv[1:]
(options, args) = self.parser.parse_args(self.expand_args(args))
console = logging.StreamHandler(sys.stderr)
console.setFormatter(logging.Formatter('%(message)s'))
console.setLevel(options.loglevel)
logging.getLogger().addHandler(console)
if options.logfile:
logfile = logging.FileHandler(options.logfile)
logfile.setFormatter(logging.Formatter('%(asctime)s, %(levelname)s, %(message)s'))
logfile.setLevel(logging.DEBUG)
logging.getLogger().addHandler(logfile)
if options.debug:
console.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
if options.debug:
import pdb
return pdb.runcall(self.main, options, args)
else:
try:
return self.main(options, args) or 0
except:
return self.handle(*sys.exc_info())
def expand_args(self, args):
"""Expands @response files and wildcards in the command line"""
result = []
for arg in args:
if arg[0] == '@' and len(arg) > 1:
arg = os.path.normcase(os.path.realpath(os.path.abspath(os.path.expanduser(arg[1:]))))
try:
with open(arg, 'rU') as resp_file:
for resp_arg in resp_file:
# Only strip the line break (whitespace is significant)
resp_arg = resp_arg.rstrip('\n')
# Only perform globbing on response file values for UNIX
if sys.platform.startswith('win'):
result.append(resp_arg)
else:
result.extend(self.glob_arg(resp_arg))
except IOError, e:
raise optparse.OptionValueError(str(e))
else:
result.append(arg)
# Perform globbing on everything for Windows
if sys.platform.startswith('win'):
result = reduce(lambda a, b: a + b, [self.glob_arg(f) for f in result], [])
return result
def glob_arg(self, arg):
"""Performs shell-style globbing of arguments"""
if set('*?[') & set(arg):
args = glob.glob(os.path.normcase(os.path.realpath(os.path.abspath(os.path.expanduser(arg)))))
if args:
return args
# Return the original parameter in the case where the parameter
# contains no wildcards or globbing returns no results
return [arg]
def handle(self, type, value, tb):
"""Exception hook for non-debug mode."""
if issubclass(type, (SystemExit, KeyboardInterrupt)):
# Just ignore system exit and keyboard interrupt errors (after all,
# they're user generated)
return 130
elif issubclass(type, (IOError, ConfigParser.Error, dbsuite.plugins.PluginError)):
# For simple errors like IOError and PluginError just output the
# message which should be sufficient for the end user (no need to
# confuse them with a full stack trace)
logging.critical(str(value))
return 1
elif issubclass(type, (optparse.OptParseError,)):
# For option parser errors output the error along with a message
# indicating how the help page can be displayed
logging.critical(str(value))
logging.critical('Try the --help option for more information.')
return 2
else:
# Otherwise, log the stack trace and the exception into the log
# file for debugging purposes
for line in traceback.format_exception(type, value, tb):
for s in line.rstrip().split('\n'):
logging.critical(s)
return 1
def pprint(self, s, indent=None, initial_indent='', subsequent_indent=''):
"""Pretty-print routine for console output.
This routine exists to provide pretty-printing capabilities for console
output. It makes use of a TextWrapper instance which is configured on
startup with the width of the console to wrap text nicely.
The s parameter provides the string or list of strings (used as a list
of paragraphs) to print. The indent, initial_indent and
subsequent_indent parameters are passed to the TextWrapper instance to
specify indentations. Either provide indent alone (which will be used
as both initial_indent and subsequent_indent), or provide intial_indent
and subsequent_indent separately.
"""
if indent is None:
self.wrapper.initial_indent = initial_indent
self.wrapper.subsequent_indent = subsequent_indent
else:
self.wrapper.initial_indent = indent
self.wrapper.subsequent_indent = indent
if isinstance(s, basestring):
s = [s]
first = True
for para in s:
if first:
first = False
else:
sys.stdout.write('\n')
para = self.wrapper.fill(para)
if isinstance(para, unicode):
para = para.encode(self.encoding)
sys.stdout.write(para + '\n')
def main(self, options, args):
pass
|
jacksonokuhn/dataverse
|
refs/heads/develop
|
tests/test_create_test_account.py
|
18
|
from selenium import webdriver
import time, unittest, config
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_create_test_account(unittest.TestCase):
def setUp(self):
if (config.local):
self.wd = webdriver.Firefox()
else:
desired_capabilities = webdriver.DesiredCapabilities.FIREFOX
desired_capabilities['version'] = '24'
desired_capabilities['platform'] = 'Linux'
desired_capabilities['name'] = 'test_access'
self.wd = webdriver.Remote(
desired_capabilities=desired_capabilities,
command_executor="http://esodvn:325caef9-81dd-47a5-8b74-433057ce888f@ondemand.saucelabs.com:80/wd/hub"
)
self.wd.implicitly_wait(60)
def test_test_create_test_account(self):
success = True
wd = self.wd
wd.get(config.accessURL)
wd.find_element_by_link_text("Create Account").click()
wd.find_element_by_id("dataverseUserForm:userName").click()
wd.find_element_by_id("dataverseUserForm:userName").clear()
wd.find_element_by_id("dataverseUserForm:userName").send_keys("tester")
wd.find_element_by_id("dataverseUserForm:inputPassword").click()
wd.find_element_by_id("dataverseUserForm:inputPassword").clear()
wd.find_element_by_id("dataverseUserForm:inputPassword").send_keys("tester")
wd.find_element_by_id("dataverseUserForm:retypePassword").click()
wd.find_element_by_id("dataverseUserForm:retypePassword").clear()
wd.find_element_by_id("dataverseUserForm:retypePassword").send_keys("tester")
wd.find_element_by_id("dataverseUserForm:firstName").click()
wd.find_element_by_id("dataverseUserForm:firstName").clear()
wd.find_element_by_id("dataverseUserForm:firstName").send_keys("test")
wd.find_element_by_id("dataverseUserForm:lastName").click()
wd.find_element_by_id("dataverseUserForm:lastName").clear()
wd.find_element_by_id("dataverseUserForm:lastName").send_keys("user")
wd.find_element_by_id("dataverseUserForm:email").click()
wd.find_element_by_id("dataverseUserForm:email").clear()
wd.find_element_by_id("dataverseUserForm:email").send_keys("kcondon@hmdc.harvard.edu")
wd.find_element_by_id("dataverseUserForm:institution").click()
wd.find_element_by_id("dataverseUserForm:institution").clear()
wd.find_element_by_id("dataverseUserForm:institution").send_keys("IQSS")
wd.find_element_by_xpath("//div[@id='dataverseUserForm:j_idt45']/div[3]").click()
wd.find_element_by_xpath("//div[@class='ui-selectonemenu-items-wrapper']//li[.='Staff']").click()
wd.find_element_by_id("dataverseUserForm:phone").click()
wd.find_element_by_id("dataverseUserForm:phone").clear()
wd.find_element_by_id("dataverseUserForm:phone").send_keys("1-222-333-4444")
wd.find_element_by_id("dataverseUserForm:save").click()
time.sleep(1)
if ("This Username is already taken." in wd.find_element_by_tag_name("html").text):
print("Username exists. Exiting.")
return
if not ("Log Out" in wd.find_element_by_tag_name("html").text):
success = false
print("User was not logged in after create account.")
self.assertTrue(success)
def tearDown(self):
if not (config.local):
print("Link to your job: https://saucelabs.com/jobs/%s" % self.wd.session_id)
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
linlife/Zabbix
|
refs/heads/master
|
alert_by_email/zabbix_sendmail.py
|
1
|
#!/usr/bin/python
#coding:utf-8
import smtplib
from email.mime.text import MIMEText
import os
import argparse
import logging
import datetime
mail_host = 'smtp.qq.com'
mail_user = ''
mail_pass = ''
mail_postfix = 'qq.com'
def send_mail(mail_to,subject,content):
me = mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content)
msg['Subject'] = subject
msg['From'] = me
msg['to'] = mail_to
global sendstatus
global senderr
try:
smtp = smtplib.SMTP()
smtp.connect('%s:25'%mail_host)
smtp.login(mail_user,mail_pass)
smtp.sendmail(me,mail_to,msg.as_string())
smtp.close()
# print 'send ok'
sendstatus = True
except Exception,e:
senderr=str(e)
# print 'not send '
print senderr
sendstatus = False
def logwrite(sendstatus,mail_to,content):
logpath='/var/log/zabbix/alert'
if not sendstatus:
content = senderr
if not os.path.isdir(logpath):
os.makedirs(logpath)
t=datetime.datetime.now()
daytime=t.strftime('%Y-%m-%d')
daylogfile=logpath+'/'+str(daytime)+'.log'
logging.basicConfig(filename=daylogfile,level=logging.DEBUG)
os.system('chown zabbix.zabbix {0}'.format(daylogfile))
logging.info('*'*130)
logging.debug(str(t)+' mail send to {0},content is :\n {1}'.format(mail_to,content))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Send mail to user for zabbix alerting')
parser.add_argument('mail_to',action="store", help='The address of the E-mail that send to user ')
parser.add_argument('subject',action="store", help='The subject of the E-mail')
parser.add_argument('content',action="store", help='The content of the E-mail')
args = parser.parse_args()
mail_to=args.mail_to
subject=args.subject
content=args.content
send_mail(mail_to,subject,content)
logwrite(sendstatus,mail_to,content)
|
sciCloud/OLiMS
|
refs/heads/master
|
lims/browser/header_table.py
|
2
|
"""ARs and Samples use HeaderTable to display object fields in their custom
view and edit screens.
"""
from dependencies.dependency import getToolByName
from lims.browser import BrowserView
from lims.interfaces import IHeaderTableFieldRenderer
from dependencies.dependency import ViewPageTemplateFile
from dependencies.dependency import PloneMessageFactory as _p
from lims.utils import getHiddenAttributesForClass
from lims.workflow import doActionFor
from lims.utils import t
from lims import bikaMessageFactory as _
from dependencies.dependency import getAdapter
from dependencies.dependency import getSecurityManager
from dependencies.dependency import view
from dependencies.dependency import ComponentLookupError
class HeaderTableView(BrowserView):
template = ViewPageTemplateFile('templates/header_table.pt')
def __call__(self):
self.errors = {}
if 'header_table_submitted' in self.request:
schema = self.context.Schema()
fields = schema.fields()
form = self.request.form
for field in fields:
fieldname = field.getName()
if fieldname in form:
if fieldname + "_uid" in form:
# references (process_form would normally do *_uid trick)
field.getMutator(self.context)(form[fieldname + "_uid"])
else:
# other fields
field.getMutator(self.context)(form[fieldname])
message = _p("Changes saved.")
self.context.plone_utils.addPortalMessage(message, 'info')
return self.template()
def three_column_list(self, input_list):
list_len = len(input_list)
# Calculate the length of the sublists
sublist_len = (list_len % 3 == 0 and list_len / 3 or list_len / 3 + 1)
def _list_end(num):
# Calculate the list end point given the list number
return num == 2 and list_len or (num + 1) * sublist_len
# Generate only filled columns
final = []
for i in range(3):
column = input_list[i * sublist_len:_list_end(i)]
if len(column) > 0:
final.append(column)
return final
def render_field_view(self, field):
fieldname = field.getName()
field = self.context.Schema()[fieldname]
ret = {'fieldName': fieldname, 'mode': 'view'}
try:
adapter = getAdapter(self.context,
interface=IHeaderTableFieldRenderer,
name=fieldname)
except ComponentLookupError:
adapter = None
if adapter:
ret = {'fieldName': fieldname,
'mode': 'structure',
'html': adapter(field)}
else:
if field.getType().find("ool") > -1:
value = field.get(self.context)
ret = {'fieldName': fieldname,
'mode': 'structure',
'html': t(_('Yes')) if value else t(_('No'))
}
elif field.getType().find("Reference") > -1:
# Prioritize method retrieval over schema's field
targets = None
if hasattr(self.context, 'get%s' % fieldname):
fieldaccessor = getattr(self.context, 'get%s' % fieldname)
if callable(fieldaccessor):
targets = fieldaccessor()
if not targets:
targets = field.get(self.context)
if targets:
if not type(targets) == list:
targets = [targets,]
sm = getSecurityManager()
if all([sm.checkPermission(view, ta) for ta in targets]):
a = ["<a href='%s'>%s</a>" % (target.absolute_url(),
target.Title())
for target in targets]
ret = {'fieldName': fieldname,
'mode': 'structure',
'html': ", ".join(a)}
else:
ret = {'fieldName': fieldname,
'mode': 'structure',
'html': ", ".join([ta.Title() for ta in targets])}
else:
ret = {'fieldName': fieldname,
'mode': 'structure',
'html': ''}
elif field.getType().lower().find('datetime') > -1:
value = field.get(self.context)
ret = {'fieldName': fieldname,
'mode': 'structure',
'html': self.ulocalized_time(value, long_format=True)
}
return ret
def sublists(self):
ret = []
prominent = []
for field in self.context.Schema().fields():
fieldname = field.getName()
state = field.widget.isVisible(self.context, 'header_table', default='invisible', field=field)
if state == 'invisible':
continue
elif state == 'prominent':
if field.widget.isVisible(self.context, 'edit', default='invisible', field=field) == 'visible':
prominent.append({'fieldName': fieldname, 'mode': 'edit'})
elif field.widget.isVisible(self.context, 'view', default='invisible', field=field) == 'visible':
prominent.append(self.render_field_view(field))
elif state == 'visible':
if field.widget.isVisible(self.context, 'edit', default='invisible', field=field) == 'visible':
ret.append({'fieldName': fieldname, 'mode': 'edit'})
elif field.widget.isVisible(self.context, 'view', default='invisible', field=field) == 'visible':
ret.append(self.render_field_view(field))
return prominent, self.three_column_list(ret)
|
ccellis/WHACK2016
|
refs/heads/master
|
flask/lib/python2.7/site-packages/sqlalchemy/ext/declarative/api.py
|
34
|
# ext/declarative/api.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions and helpers for declarative."""
from ...schema import Table, MetaData, Column
from ...orm import synonym as _orm_synonym, \
comparable_property,\
interfaces, properties, attributes
from ...orm.util import polymorphic_union
from ...orm.base import _mapper_or_none
from ...util import OrderedDict, hybridmethod, hybridproperty
from ... import util
from ... import exc
import weakref
from .base import _as_declarative, \
_declarative_constructor,\
_DeferredMapperConfig, _add_attribute
from .clsregistry import _class_resolver
def instrument_declarative(cls, registry, metadata):
"""Given a class, configure the class declaratively,
using the given registry, which can be any dictionary, and
MetaData object.
"""
if '_decl_class_registry' in cls.__dict__:
raise exc.InvalidRequestError(
"Class %r already has been "
"instrumented declaratively" % cls)
cls._decl_class_registry = registry
cls.metadata = metadata
_as_declarative(cls, cls.__name__, cls.__dict__)
def has_inherited_table(cls):
"""Given a class, return True if any of the classes it inherits from has a
mapped table, otherwise return False.
"""
for class_ in cls.__mro__[1:]:
if getattr(class_, '__table__', None) is not None:
return True
return False
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_):
if '_decl_class_registry' not in cls.__dict__:
_as_declarative(cls, classname, cls.__dict__)
type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
_add_attribute(cls, key, value)
def synonym_for(name, map_column=False):
"""Decorator, make a Python @property a query synonym for a column.
A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being
decorated is the 'descriptor', otherwise passes its arguments through to
synonym()::
@synonym_for('col')
@property
def prop(self):
return 'special sauce'
The regular ``synonym()`` is also usable directly in a declarative setting
and may be convenient for read/write properties::
prop = synonym('col', descriptor=property(_read_prop, _write_prop))
"""
def decorate(fn):
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
def comparable_using(comparator_factory):
"""Decorator, allow a Python @property to be used in query criteria.
This is a decorator front end to
:func:`~sqlalchemy.orm.comparable_property` that passes
through the comparator_factory and the function being decorated::
@comparable_using(MyComparatorType)
@property
def prop(self):
return 'special sauce'
The regular ``comparable_property()`` is also usable directly in a
declarative setting and may be convenient for read/write properties::
prop = comparable_property(MyComparatorType)
"""
def decorate(fn):
return comparable_property(comparator_factory, fn)
return decorate
class declared_attr(interfaces._MappedAttribute, property):
"""Mark a class-level method as representing the definition of
a mapped property or special declarative member name.
@declared_attr turns the attribute into a scalar-like
property that can be invoked from the uninstantiated class.
Declarative treats attributes specifically marked with
@declared_attr as returning a construct that is specific
to mapping or declarative table configuration. The name
of the attribute is that of what the non-dynamic version
of the attribute would be.
@declared_attr is more often than not applicable to mixins,
to define relationships that are to be applied to different
implementors of the class::
class ProvidesUser(object):
"A mixin that adds a 'user' relationship to classes."
@declared_attr
def user(self):
return relationship("User")
It also can be applied to mapped classes, such as to provide
a "polymorphic" scheme for inheritance::
class Employee(Base):
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Employee':
return {
"polymorphic_on":cls.type,
"polymorphic_identity":"Employee"
}
else:
return {"polymorphic_identity":cls.__name__}
.. versionchanged:: 0.8 :class:`.declared_attr` can be used with
non-ORM or extension attributes, such as user-defined attributes
or :func:`.association_proxy` objects, which will be assigned
to the class at class construction time.
"""
def __init__(self, fget, cascading=False):
super(declared_attr, self).__init__(fget)
self.__doc__ = fget.__doc__
self._cascading = cascading
def __get__(desc, self, cls):
reg = cls.__dict__.get('_sa_declared_attr_reg', None)
if reg is None:
manager = attributes.manager_of_class(cls)
if manager is None:
util.warn(
"Unmanaged access of declarative attribute %s from "
"non-mapped class %s" %
(desc.fget.__name__, cls.__name__))
return desc.fget(cls)
if reg is None:
return desc.fget(cls)
elif desc in reg:
return reg[desc]
else:
reg[desc] = obj = desc.fget(cls)
return obj
@hybridmethod
def _stateful(cls, **kw):
return _stateful_declared_attr(**kw)
@hybridproperty
def cascading(cls):
"""Mark a :class:`.declared_attr` as cascading.
This is a special-use modifier which indicates that a column
or MapperProperty-based declared attribute should be configured
distinctly per mapped subclass, within a mapped-inheritance scenario.
Below, both MyClass as well as MySubClass will have a distinct
``id`` Column object established::
class HasSomeAttribute(object):
@declared_attr.cascading
def some_id(cls):
if has_inherited_table(cls):
return Column(
ForeignKey('myclass.id'), primary_key=True)
else:
return Column(Integer, primary_key=True)
return Column('id', Integer, primary_key=True)
class MyClass(HasSomeAttribute, Base):
""
# ...
class MySubClass(MyClass):
""
# ...
The behavior of the above configuration is that ``MySubClass``
will refer to both its own ``id`` column as well as that of
``MyClass`` underneath the attribute named ``some_id``.
.. seealso::
:ref:`declarative_inheritance`
:ref:`mixin_inheritance_columns`
"""
return cls._stateful(cascading=True)
class _stateful_declared_attr(declared_attr):
def __init__(self, **kw):
self.kw = kw
def _stateful(self, **kw):
new_kw = self.kw.copy()
new_kw.update(kw)
return _stateful_declared_attr(**new_kw)
def __call__(self, fn):
return declared_attr(fn, **self.kw)
def declarative_base(bind=None, metadata=None, mapper=None, cls=object,
name='Base', constructor=_declarative_constructor,
class_registry=None,
metaclass=DeclarativeMeta):
"""Construct a base class for declarative class definitions.
The new base class will be given a metaclass that produces
appropriate :class:`~sqlalchemy.schema.Table` objects and makes
the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the
information provided declaratively in the class and any subclasses
of the class.
:param bind: An optional
:class:`~sqlalchemy.engine.Connectable`, will be assigned
the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData`
instance.
:param metadata:
An optional :class:`~sqlalchemy.schema.MetaData` instance. All
:class:`~sqlalchemy.schema.Table` objects implicitly declared by
subclasses of the base will share this MetaData. A MetaData instance
will be created if none is provided. The
:class:`~sqlalchemy.schema.MetaData` instance will be available via the
`metadata` attribute of the generated declarative base class.
:param mapper:
An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will
be used to map subclasses to their Tables.
:param cls:
Defaults to :class:`object`. A type to use as the base for the generated
declarative base class. May be a class or tuple of classes.
:param name:
Defaults to ``Base``. The display name for the generated
class. Customizing this is not required, but can improve clarity in
tracebacks and debugging.
:param constructor:
Defaults to
:func:`~sqlalchemy.ext.declarative._declarative_constructor`, an
__init__ implementation that assigns \**kwargs for declared
fields and relationships to an instance. If ``None`` is supplied,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param class_registry: optional dictionary that will serve as the
registry of class names-> mapped classes when string names
are used to identify classes inside of :func:`.relationship`
and others. Allows two or more declarative base classes
to share the same registry of class names for simplified
inter-base relationships.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
declarative base class.
.. seealso::
:func:`.as_declarative`
"""
lcl_metadata = metadata or MetaData()
if bind:
lcl_metadata.bind = bind
if class_registry is None:
class_registry = weakref.WeakValueDictionary()
bases = not isinstance(cls, tuple) and (cls,) or cls
class_dict = dict(_decl_class_registry=class_registry,
metadata=lcl_metadata)
if constructor:
class_dict['__init__'] = constructor
if mapper:
class_dict['__mapper_cls__'] = mapper
return metaclass(name, bases, class_dict)
def as_declarative(**kw):
"""
Class decorator for :func:`.declarative_base`.
Provides a syntactical shortcut to the ``cls`` argument
sent to :func:`.declarative_base`, allowing the base class
to be converted in-place to a "declarative" base::
from sqlalchemy.ext.declarative import as_declarative
@as_declarative()
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyMappedClass(Base):
# ...
All keyword arguments passed to :func:`.as_declarative` are passed
along to :func:`.declarative_base`.
.. versionadded:: 0.8.3
.. seealso::
:func:`.declarative_base`
"""
def decorate(cls):
kw['cls'] = cls
kw['name'] = cls.__name__
return declarative_base(**kw)
return decorate
class ConcreteBase(object):
"""A helper class for 'concrete' declarative mappings.
:class:`.ConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.ConcreteBase` produces a mapped
table for the class itself. Compare to :class:`.AbstractConcreteBase`,
which does not.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'concrete':True}
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
.. seealso::
:class:`.AbstractConcreteBase`
:ref:`concrete_inheritance`
:ref:`inheritance_concrete_helpers`
"""
@classmethod
def _create_polymorphic_union(cls, mappers):
return polymorphic_union(OrderedDict(
(mp.polymorphic_identity, mp.local_table)
for mp in mappers
), 'type', 'pjoin')
@classmethod
def __declare_first__(cls):
m = cls.__mapper__
if m.with_polymorphic:
return
mappers = list(m.self_and_descendants)
pjoin = cls._create_polymorphic_union(mappers)
m._set_with_polymorphic(("*", pjoin))
m._set_polymorphic_on(pjoin.c.type)
class AbstractConcreteBase(ConcreteBase):
"""A helper class for 'concrete' declarative mappings.
:class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.AbstractConcreteBase` does produce a mapped class
for the base class, however it is not persisted to any table; it
is instead mapped directly to the "polymorphic" selectable directly
and is only used for selecting. Compare to :class:`.ConcreteBase`,
which does create a persisted table for the base class.
Example::
from sqlalchemy.ext.declarative import AbstractConcreteBase
class Employee(AbstractConcreteBase, Base):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
The abstract base class is handled by declarative in a special way;
at class configuration time, it behaves like a declarative mixin
or an ``__abstract__`` base class. Once classes are configured
and mappings are produced, it then gets mapped itself, but
after all of its decscendants. This is a very unique system of mapping
not found in any other SQLAlchemy system.
Using this approach, we can specify columns and properties
that will take place on mapped subclasses, in the way that
we normally do as in :ref:`declarative_mixins`::
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True)
class Employee(AbstractConcreteBase, Base):
employee_id = Column(Integer, primary_key=True)
@declared_attr
def company_id(cls):
return Column(ForeignKey('company.id'))
@declared_attr
def company(cls):
return relationship("Company")
class Manager(Employee):
__tablename__ = 'manager'
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
When we make use of our mappings however, both ``Manager`` and
``Employee`` will have an independently usable ``.company`` attribute::
session.query(Employee).filter(Employee.company.has(id=5))
.. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase`
have been reworked to support relationships established directly
on the abstract base, without any special configurational steps.
.. seealso::
:class:`.ConcreteBase`
:ref:`concrete_inheritance`
:ref:`inheritance_concrete_helpers`
"""
__no_table__ = True
@classmethod
def __declare_first__(cls):
cls._sa_decl_prepare_nocascade()
@classmethod
def _sa_decl_prepare_nocascade(cls):
if getattr(cls, '__mapper__', None):
return
to_map = _DeferredMapperConfig.config_for_cls(cls)
# can't rely on 'self_and_descendants' here
# since technically an immediate subclass
# might not be mapped, but a subclass
# may be.
mappers = []
stack = list(cls.__subclasses__())
while stack:
klass = stack.pop()
stack.extend(klass.__subclasses__())
mn = _mapper_or_none(klass)
if mn is not None:
mappers.append(mn)
pjoin = cls._create_polymorphic_union(mappers)
# For columns that were declared on the class, these
# are normally ignored with the "__no_table__" mapping,
# unless they have a different attribute key vs. col name
# and are in the properties argument.
# In that case, ensure we update the properties entry
# to the correct column from the pjoin target table.
declared_cols = set(to_map.declared_columns)
for k, v in list(to_map.properties.items()):
if v in declared_cols:
to_map.properties[k] = pjoin.c[v.key]
to_map.local_table = pjoin
m_args = to_map.mapper_args_fn or dict
def mapper_args():
args = m_args()
args['polymorphic_on'] = pjoin.c.type
return args
to_map.mapper_args_fn = mapper_args
m = to_map.map()
for scls in cls.__subclasses__():
sm = _mapper_or_none(scls)
if sm and sm.concrete and cls in scls.__bases__:
sm._set_concrete_base(m)
class DeferredReflection(object):
"""A helper class for construction of mappings based on
a deferred reflection step.
Normally, declarative can be used with reflection by
setting a :class:`.Table` object using autoload=True
as the ``__table__`` attribute on a declarative class.
The caveat is that the :class:`.Table` must be fully
reflected, or at the very least have a primary key column,
at the point at which a normal declarative mapping is
constructed, meaning the :class:`.Engine` must be available
at class declaration time.
The :class:`.DeferredReflection` mixin moves the construction
of mappers to be at a later point, after a specific
method is called which first reflects all :class:`.Table`
objects created so far. Classes can define it as such::
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import DeferredReflection
Base = declarative_base()
class MyClass(DeferredReflection, Base):
__tablename__ = 'mytable'
Above, ``MyClass`` is not yet mapped. After a series of
classes have been defined in the above fashion, all tables
can be reflected and mappings created using
:meth:`.prepare`::
engine = create_engine("someengine://...")
DeferredReflection.prepare(engine)
The :class:`.DeferredReflection` mixin can be applied to individual
classes, used as the base for the declarative base itself,
or used in a custom abstract class. Using an abstract base
allows that only a subset of classes to be prepared for a
particular prepare step, which is necessary for applications
that use more than one engine. For example, if an application
has two engines, you might use two bases, and prepare each
separately, e.g.::
class ReflectedOne(DeferredReflection, Base):
__abstract__ = True
class ReflectedTwo(DeferredReflection, Base):
__abstract__ = True
class MyClass(ReflectedOne):
__tablename__ = 'mytable'
class MyOtherClass(ReflectedOne):
__tablename__ = 'myothertable'
class YetAnotherClass(ReflectedTwo):
__tablename__ = 'yetanothertable'
# ... etc.
Above, the class hierarchies for ``ReflectedOne`` and
``ReflectedTwo`` can be configured separately::
ReflectedOne.prepare(engine_one)
ReflectedTwo.prepare(engine_two)
.. versionadded:: 0.8
"""
@classmethod
def prepare(cls, engine):
"""Reflect all :class:`.Table` objects for all current
:class:`.DeferredReflection` subclasses"""
to_map = _DeferredMapperConfig.classes_for_base(cls)
for thingy in to_map:
cls._sa_decl_prepare(thingy.local_table, engine)
thingy.map()
mapper = thingy.cls.__mapper__
metadata = mapper.class_.metadata
for rel in mapper._props.values():
if isinstance(rel, properties.RelationshipProperty) and \
rel.secondary is not None:
if isinstance(rel.secondary, Table):
cls._reflect_table(rel.secondary, engine)
elif isinstance(rel.secondary, _class_resolver):
rel.secondary._resolvers += (
cls._sa_deferred_table_resolver(engine, metadata),
)
@classmethod
def _sa_deferred_table_resolver(cls, engine, metadata):
def _resolve(key):
t1 = Table(key, metadata)
cls._reflect_table(t1, engine)
return t1
return _resolve
@classmethod
def _sa_decl_prepare(cls, local_table, engine):
# autoload Table, which is already
# present in the metadata. This
# will fill in db-loaded columns
# into the existing Table object.
if local_table is not None:
cls._reflect_table(local_table, engine)
@classmethod
def _reflect_table(cls, table, engine):
Table(table.name,
table.metadata,
extend_existing=True,
autoload_replace=False,
autoload=True,
autoload_with=engine,
schema=table.schema)
|
ealegol/kolla-newton
|
refs/heads/master
|
kolla/cmd/genpwd.py
|
2
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import random
import string
import uuid
import yaml
from Crypto.PublicKey import RSA
def generate_RSA(bits=4096):
new_key = RSA.generate(bits, os.urandom)
private_key = new_key.exportKey("PEM")
public_key = new_key.publickey().exportKey("OpenSSH")
return private_key, public_key
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--passwords', type=str,
default=os.path.abspath('/etc/kolla/passwords.yml'),
help=('Path to the passwords yml file'))
args = parser.parse_args()
passwords_file = os.path.expanduser(args.passwords)
# These keys should be random uuids
uuid_keys = ['ceph_cluster_fsid', 'rbd_secret_uuid',
'gnocchi_project_id', 'gnocchi_resource_id',
'gnocchi_user_id']
# SSH key pair
ssh_keys = ['kolla_ssh_key', 'nova_ssh_key',
'keystone_ssh_key', 'bifrost_ssh_key']
# If these keys are None, leave them as None
blank_keys = ['docker_registry_password']
# length of password
length = 40
with open(passwords_file, 'r') as f:
passwords = yaml.safe_load(f.read())
for k, v in passwords.items():
if (k in ssh_keys and
(v is None
or v.get('public_key') is None
and v.get('private_key') is None)):
private_key, public_key = generate_RSA()
passwords[k] = {
'private_key': private_key,
'public_key': public_key
}
continue
if v is None:
if k in blank_keys and v is None:
continue
if k in uuid_keys:
passwords[k] = str(uuid.uuid4())
else:
passwords[k] = ''.join([
random.SystemRandom().choice(
string.ascii_letters + string.digits)
for n in range(length)
])
with open(passwords_file, 'w') as f:
f.write(yaml.dump(passwords, default_flow_style=False))
if __name__ == '__main__':
main()
|
1suming/readthedocs.org
|
refs/heads/master
|
readthedocs/projects/migrations/0018_add_doctypes.py
|
16
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.documentation_type'
db.add_column('projects_project', 'documentation_type', self.gf('django.db.models.fields.CharField')(default='sphinx', max_length=20), keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.documentation_type'
db.delete_column('projects_project', 'documentation_type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.file': {
'Meta': {'ordering': "('denormalized_path',)", 'object_name': 'File'},
'content': ('django.db.models.fields.TextField', [], {}),
'denormalized_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'heading': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['projects.File']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'})
},
'projects.filerevision': {
'Meta': {'ordering': "('-revision_number',)", 'object_name': 'FileRevision'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diff': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['projects.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['projects']
|
axbaretto/beam
|
refs/heads/master
|
sdks/python/.tox/docs/lib/python2.7/site-packages/hamcrest/library/number/ordering_comparison.py
|
6
|
from hamcrest.core.base_matcher import BaseMatcher
import operator
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
class OrderingComparison(BaseMatcher):
def __init__(self, value, comparison_function, comparison_description):
self.value = value
self.comparison_function = comparison_function
self.comparison_description = comparison_description
def _matches(self, item):
return self.comparison_function(item, self.value)
def describe_to(self, description):
description.append_text('a value ') \
.append_text(self.comparison_description) \
.append_text(' ') \
.append_description_of(self.value)
def greater_than(value):
"""Matches if object is greater than a given value.
:param value: The value to compare against.
"""
return OrderingComparison(value, operator.gt, 'greater than')
def greater_than_or_equal_to(value):
"""Matches if object is greater than or equal to a given value.
:param value: The value to compare against.
"""
return OrderingComparison(value, operator.ge, 'greater than or equal to')
def less_than(value):
"""Matches if object is less than a given value.
:param value: The value to compare against.
"""
return OrderingComparison(value, operator.lt, 'less than')
def less_than_or_equal_to(value):
"""Matches if object is less than or equal to a given value.
:param value: The value to compare against.
"""
return OrderingComparison(value, operator.le, 'less than or equal to')
|
DavidCain/WinterSchool
|
refs/heads/master
|
ws/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-26 23:54
from __future__ import unicode_literals
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import localflavor.us.models
import phonenumber_field.modelfields
from django.db import migrations, models
import ws.fields
import ws.utils.dates
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name='Car',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'license_plate',
models.CharField(
max_length=31,
validators=[
django.core.validators.RegexValidator(
'^[a-zA-Z0-9 ]*$',
'Only alphanumeric characters and spaces allowed',
)
],
),
),
('state', localflavor.us.models.USStateField(max_length=2)),
('make', models.CharField(max_length=63)),
('model', models.CharField(max_length=63)),
(
'year',
models.PositiveIntegerField(
validators=[
django.core.validators.MaxValueValidator(2020),
django.core.validators.MinValueValidator(1903),
]
),
),
('color', models.CharField(max_length=63)),
],
),
migrations.CreateModel(
name='ClimbingLeaderApplication',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
(
'previous_rating',
models.CharField(
blank=True, help_text='Previous rating (if any)', max_length=255
),
),
(
'year',
models.PositiveIntegerField(
default=ws.utils.dates.ws_year,
help_text='Year this application pertains to.',
validators=[django.core.validators.MinValueValidator(2014)],
),
),
(
'desired_rating',
models.CharField(
choices=[
('Bouldering', 'Bouldering'),
('Single-pitch', 'Single-pitch'),
('Multi-pitch', 'Multi-pitch'),
('Bouldering + Single-pitch', 'Bouldering + Single-pitch'),
('Bouldering + Multi-pitch', 'Bouldering + Multi-pitch'),
],
max_length=32,
),
),
('years_climbing', models.IntegerField()),
('years_climbing_outside', models.IntegerField()),
(
'outdoor_bouldering_grade',
models.CharField(
help_text='At what grade are you comfortable bouldering outside?',
max_length=255,
),
),
(
'outdoor_sport_leading_grade',
models.CharField(
help_text='At what grade are you comfortable leading outside on sport routes?',
max_length=255,
),
),
(
'outdoor_trad_leading_grade',
models.CharField(
help_text='At what grade are you comfortable leading outside on trad routes?',
max_length=255,
),
),
(
'familiarity_spotting',
models.CharField(
choices=[
('none', 'not at all'),
('some', 'some exposure'),
('comfortable', 'comfortable'),
('very comfortable', 'very comfortable'),
],
max_length=16,
verbose_name='Familarity with spotting boulder problems',
),
),
(
'familiarity_bolt_anchors',
models.CharField(
choices=[
('none', 'not at all'),
('some', 'some exposure'),
('comfortable', 'comfortable'),
('very comfortable', 'very comfortable'),
],
max_length=16,
verbose_name="Familiarity with 2-bolt 'sport' anchors",
),
),
(
'familiarity_gear_anchors',
models.CharField(
choices=[
('none', 'not at all'),
('some', 'some exposure'),
('comfortable', 'comfortable'),
('very comfortable', 'very comfortable'),
],
max_length=16,
verbose_name="Familiarity with trad 'gear' anchors",
),
),
(
'familiarity_sr',
models.CharField(
choices=[
('none', 'not at all'),
('some', 'some exposure'),
('comfortable', 'comfortable'),
('very comfortable', 'very comfortable'),
],
max_length=16,
verbose_name='Familiarity with multi-pitch self-rescue',
),
),
(
'spotting_description',
models.TextField(
blank=True,
help_text='Describe how you would spot a climber on a meandering tall bouldering problem.',
),
),
(
'tr_anchor_description',
models.TextField(
blank=True,
help_text='Describe how you would build a top-rope anchor at a sport crag.',
verbose_name='Top rope anchor description',
),
),
(
'rappel_description',
models.TextField(
blank=True,
help_text='Describe how you would set up a safe rappel.',
),
),
(
'gear_anchor_description',
models.TextField(
blank=True,
help_text='Describe what you look for when building a typical gear anchor.',
),
),
('formal_training', models.TextField(blank=True)),
('teaching_experience', models.TextField(blank=True)),
(
'notable_climbs',
models.TextField(
blank=True,
help_text='What are some particularly memorable climbs you have done?',
),
),
(
'favorite_route',
models.TextField(
blank=True,
help_text='Do you have a favorite route? If so, what is it and why?',
),
),
(
'extra_info',
models.TextField(
blank=True,
help_text='Is there anything else you would like us to know?',
),
),
],
options={'ordering': ['time_created'], 'abstract': False},
),
migrations.CreateModel(
name='Discount',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'active',
models.BooleanField(
default=True, help_text='Discount is currently open & active'
),
),
('name', models.CharField(max_length=255)),
('summary', models.CharField(max_length=255)),
('terms', models.TextField(max_length=4095)),
('url', models.URLField(blank=True, null=True)),
(
'ga_key',
models.CharField(
help_text='key for Google spreadsheet with membership information (shared as read-only with the company)',
max_length=63,
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
(
'student_required',
models.BooleanField(
default=False,
help_text='Discount provider requires recipients to be students',
),
),
(
'report_school',
models.BooleanField(
default=False,
help_text='Report MIT affiliation if participant is a student',
),
),
(
'report_student',
models.BooleanField(
default=False,
help_text='Report MIT affiliation and student status to discount provider',
),
),
(
'report_leader',
models.BooleanField(
default=False,
help_text='Report MITOC leader status to discount provider',
),
),
(
'report_access',
models.BooleanField(
default=False,
help_text='Report if participant should have leader, student, or admin level access',
),
),
],
),
migrations.CreateModel(
name='EmergencyContact',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('name', models.CharField(max_length=255)),
(
'cell_phone',
phonenumber_field.modelfields.PhoneNumberField(max_length=128),
),
('relationship', models.CharField(max_length=63)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='EmergencyInfo',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('allergies', models.CharField(max_length=255)),
('medications', models.CharField(max_length=255)),
(
'medical_history',
models.TextField(
help_text='Anything your trip leader would want to know about.',
max_length=2000,
),
),
(
'emergency_contact',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to='ws.EmergencyContact',
),
),
],
),
migrations.CreateModel(
name='Feedback',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('showed_up', models.BooleanField(default=True)),
('comments', models.TextField(max_length=2000)),
('time_created', models.DateTimeField(auto_now_add=True)),
],
options={'ordering': ['participant', '-time_created']},
),
migrations.CreateModel(
name='HikingLeaderApplication',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
(
'previous_rating',
models.CharField(
blank=True, help_text='Previous rating (if any)', max_length=255
),
),
(
'year',
models.PositiveIntegerField(
default=ws.utils.dates.ws_year,
help_text='Year this application pertains to.',
validators=[django.core.validators.MinValueValidator(2014)],
),
),
(
'desired_rating',
models.CharField(
choices=[('Leader', 'Leader'), ('Co-Leader', 'Co-Leader')],
help_text='Co-Leader: Can co-lead a 3-season hiking trip with a Leader. Leader: Can run 3-season hiking trips.',
max_length=10,
),
),
(
'mitoc_experience',
models.TextField(
help_text='How long have you been a MITOC member? Please indicate what official MITOC hikes and Circuses you have been on. Include approximate dates and locations, number of participants, trail conditions, type of trip, etc. Give details of whether you participated, led, or co-led these trips. [Optional]: If you like, briefly summarize your experience on unofficial trips or experience outside of New England.',
max_length=5000,
verbose_name='Hiking Experience with MITOC',
),
),
(
'formal_training',
models.TextField(
blank=True,
help_text='Please give details of any medical training and qualifications, with dates. Also include any other formal outdoor education or qualifications.',
max_length=5000,
),
),
(
'leadership_experience',
models.TextField(
blank=True,
help_text="If you've been a leader elsewhere, please describe that here. This could include leadership in other collegiate outing clubs, student sports clubs, NOLS, Outward Bound, or AMC; working as a guide, summer camp counselor, or Scout leader; or organizing hikes with friends.",
max_length=5000,
verbose_name='Group outdoor/leadership experience',
),
),
],
options={'ordering': ['time_created'], 'abstract': False},
),
migrations.CreateModel(
name='LeaderRating',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
(
'activity',
models.CharField(
choices=[
('biking', 'Biking'),
('boating', 'Boating'),
('cabin', 'Cabin'),
('climbing', 'Climbing'),
('hiking', 'Hiking'),
('winter_school', 'Winter School'),
('circus', 'Circus'),
('official_event', 'Official Event'),
('course', 'Course'),
],
max_length=31,
),
),
('rating', models.CharField(max_length=31)),
('notes', models.TextField(blank=True, max_length=500)),
('active', models.BooleanField(default=True)),
],
options={'ordering': ['participant'], 'abstract': False},
),
migrations.CreateModel(
name='LeaderRecommendation',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
(
'activity',
models.CharField(
choices=[
('biking', 'Biking'),
('boating', 'Boating'),
('cabin', 'Cabin'),
('climbing', 'Climbing'),
('hiking', 'Hiking'),
('winter_school', 'Winter School'),
('circus', 'Circus'),
('official_event', 'Official Event'),
('course', 'Course'),
],
max_length=31,
),
),
('rating', models.CharField(max_length=31)),
('notes', models.TextField(blank=True, max_length=500)),
],
options={'ordering': ['participant'], 'abstract': False},
),
migrations.CreateModel(
name='LeaderSignUp',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('notes', models.TextField(blank=True, max_length=1000)),
],
options={'ordering': ['time_created']},
),
migrations.CreateModel(
name='LectureAttendance',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'year',
models.PositiveIntegerField(
default=ws.utils.dates.ws_year,
help_text='Winter School year when lectures were attended.',
validators=[django.core.validators.MinValueValidator(2016)],
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='LotteryInfo',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'car_status',
models.CharField(
choices=[
('none', 'Not driving'),
('own', 'Can drive own car'),
('rent', 'Willing to rent'),
],
default='none',
max_length=7,
),
),
(
'number_of_passengers',
models.PositiveIntegerField(
blank=True,
null=True,
validators=[
django.core.validators.MaxValueValidator(
13, message='Do you drive a bus?'
)
],
),
),
('last_updated', models.DateTimeField(auto_now=True)),
],
options={'ordering': ['car_status', 'number_of_passengers']},
),
migrations.CreateModel(
name='MentorActivity',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('name', models.CharField(max_length=31, unique=True)),
],
),
migrations.CreateModel(
name='Participant',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('user_id', models.IntegerField()),
('name', models.CharField(max_length=255)),
(
'cell_phone',
phonenumber_field.modelfields.PhoneNumberField(
blank=True, max_length=128
),
),
('last_updated', models.DateTimeField(auto_now=True)),
(
'email',
models.EmailField(
help_text="This will be shared with leaders & other participants. <a href='/accounts/email/'>Manage email addresses</a>.",
max_length=254,
unique=True,
),
),
(
'affiliation',
models.CharField(
choices=[
(
'Undergraduate student',
[('MU', 'MIT undergrad'), ('NU', 'Non-MIT undergrad')],
),
(
'Graduate student',
[
('MG', 'MIT grad student'),
('NG', 'Non-MIT grad student'),
],
),
('MA', 'MIT affiliate'),
('NA', 'Non-affiliate'),
],
max_length=2,
),
),
(
'car',
ws.fields.OptionalOneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='ws.Car',
),
),
('discounts', models.ManyToManyField(blank=True, to='ws.Discount')),
(
'emergency_info',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to='ws.EmergencyInfo',
),
),
],
options={'ordering': ['name', 'email']},
),
migrations.CreateModel(
name='SignUp',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('notes', models.TextField(blank=True, max_length=1000)),
('order', models.IntegerField(blank=True, null=True)),
('manual_order', models.IntegerField(blank=True, null=True)),
('on_trip', models.BooleanField(default=False)),
(
'participant',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
],
options={'ordering': ['manual_order', 'last_updated']},
),
migrations.CreateModel(
name='Trip',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'activity',
models.CharField(
choices=[
('biking', 'Biking'),
('boating', 'Boating'),
('cabin', 'Cabin'),
('climbing', 'Climbing'),
('hiking', 'Hiking'),
('winter_school', 'Winter School'),
('circus', 'Circus'),
('official_event', 'Official Event'),
('course', 'Course'),
],
default='winter_school',
max_length=31,
),
),
(
'allow_leader_signups',
models.BooleanField(
default=False,
help_text='Allow leaders to sign themselves up as trip leaders. (Leaders can always sign up as participants). Recommended for Circuses!',
),
),
('name', models.CharField(max_length=127)),
('description', models.TextField()),
(
'maximum_participants',
models.PositiveIntegerField(
default=8, verbose_name='Max participants'
),
),
('difficulty_rating', models.CharField(max_length=63)),
(
'level',
models.CharField(
blank=True,
help_text="This trip's A, B, or C designation (plus I/S rating if applicable).",
max_length=255,
null=True,
),
),
(
'prereqs',
models.CharField(
blank=True, max_length=255, verbose_name='Prerequisites'
),
),
('chair_approved', models.BooleanField(default=False)),
(
'notes',
models.TextField(
blank=True,
help_text='Participants must add notes to their signups if you complete this field. This is a great place to ask important questions.',
max_length=2000,
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
('last_edited', models.DateTimeField(auto_now=True)),
('trip_date', models.DateField(default=ws.utils.dates.nearest_sat)),
(
'signups_open_at',
models.DateTimeField(default=django.utils.timezone.now),
),
(
'signups_close_at',
models.DateTimeField(
blank=True, default=ws.utils.dates.wed_morning, null=True
),
),
(
'let_participants_drop',
models.BooleanField(
default=False,
help_text='Allow participants to remove themselves from the trip any time before its start date.',
),
),
(
'honor_participant_pairing',
models.BooleanField(
default=True,
help_text='Try to place paired participants together on the trip.',
),
),
(
'algorithm',
models.CharField(
choices=[
('lottery', 'lottery'),
('fcfs', 'first-come, first-serve'),
],
default='lottery',
max_length=31,
),
),
(
'lottery_task_id',
models.CharField(blank=True, max_length=36, null=True, unique=True),
),
('lottery_log', models.TextField(blank=True, null=True)),
(
'creator',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='created_trips',
to='ws.Participant',
),
),
],
options={'ordering': ['-trip_date', '-time_created']},
),
migrations.CreateModel(
name='TripInfo',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('start_location', models.CharField(max_length=127)),
('start_time', models.CharField(max_length=63)),
(
'turnaround_time',
models.CharField(
blank=True,
help_text="The time at which you'll turn back and head for your car/starting location",
max_length=63,
),
),
(
'return_time',
models.CharField(
help_text='When you expect to return to your car/starting location and be able to call the WIMP',
max_length=63,
),
),
(
'worry_time',
models.CharField(
help_text='Suggested: return time +3 hours. If the WIMP has not heard from you after this time and is unable to make contact with any leaders or participants, the authorities will be called.',
max_length=63,
),
),
(
'itinerary',
models.TextField(
help_text='A detailed account of your trip plan. Where will you be going? What route will you be taking? Include trails, peaks, intermediate destinations, back-up plans- anything that would help rescuers find you.'
),
),
(
'drivers',
models.ManyToManyField(
blank=True,
help_text="If a trip participant is driving, but is not on this list, they must first submit <a href='/profile/edit/#car'>information about their car</a>. They should then be added here.",
to='ws.Participant',
),
),
],
),
migrations.CreateModel(
name='WaitList',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'trip',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to='ws.Trip'
),
),
],
),
migrations.CreateModel(
name='WaitListSignup',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
('manual_order', models.IntegerField(blank=True, null=True)),
(
'signup',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to='ws.SignUp'
),
),
(
'waitlist',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.WaitList'
),
),
],
options={'ordering': ['-manual_order', 'time_created']},
),
migrations.CreateModel(
name='WinterSchoolLeaderApplication',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
(
'previous_rating',
models.CharField(
blank=True, help_text='Previous rating (if any)', max_length=255
),
),
(
'year',
models.PositiveIntegerField(
default=ws.utils.dates.ws_year,
help_text='Year this application pertains to.',
validators=[django.core.validators.MinValueValidator(2014)],
),
),
('desired_rating', models.CharField(max_length=255)),
(
'taking_wfa',
models.CharField(
choices=[
('Yes', 'Yes'),
('No', 'No'),
('Maybe', "Maybe/don't know"),
],
help_text='Save $100 on the course fee by leading two or more trips!',
max_length=10,
verbose_name='Do you plan on taking the subsidized WFA at MIT?',
),
),
(
'training',
models.TextField(
blank=True,
help_text='Details of any medical, technical, or leadership training and qualifications relevant to the winter environment. State the approximate dates of these activities. Leave blank if not applicable.',
max_length=5000,
verbose_name='Formal training and qualifications',
),
),
(
'winter_experience',
models.TextField(
blank=True,
help_text='Details of previous winter outdoors experience. Include the type of trip (x-country skiiing, above treeline, snowshoeing, ice climbing, etc), approximate dates and locations, numbers of participants, notable trail and weather conditions. Please also give details of whether you participated, led, or co-led these trips.',
max_length=5000,
),
),
(
'other_experience',
models.TextField(
blank=True,
help_text='Details about any relevant non-winter experience',
max_length=5000,
verbose_name='Other outdoors/leadership experience',
),
),
(
'notes_or_comments',
models.TextField(
blank=True,
help_text='Any relevant details, such as any limitations on availability on Tue/Thurs nights or weekends during IAP.',
max_length=5000,
),
),
(
'mentee_activities',
models.ManyToManyField(
blank=True,
help_text='Please select at least one.',
related_name='mentee_activities',
to='ws.MentorActivity',
verbose_name='For which activities would you like a mentor?',
),
),
(
'mentor_activities',
models.ManyToManyField(
blank=True,
help_text='Please select at least one.',
related_name='activities_mentored',
to='ws.MentorActivity',
verbose_name='Which activities would you like to mentor?',
),
),
(
'participant',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
],
options={'ordering': ['time_created'], 'abstract': False},
),
migrations.CreateModel(
name='WinterSchoolSettings',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('time_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
(
'allow_setting_attendance',
models.BooleanField(
default=False,
verbose_name='Let participants set lecture attendance',
),
),
(
'last_updated_by',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='ws.Participant',
),
),
],
options={'abstract': False},
),
migrations.AddField(
model_name='waitlist',
name='unordered_signups',
field=models.ManyToManyField(through='ws.WaitListSignup', to='ws.SignUp'),
),
migrations.AddField(
model_name='trip',
name='info',
field=ws.fields.OptionalOneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='ws.TripInfo',
),
),
migrations.AddField(
model_name='trip',
name='leaders',
field=models.ManyToManyField(
blank=True, related_name='trips_led', to='ws.Participant'
),
),
migrations.AddField(
model_name='trip',
name='signed_up_participants',
field=models.ManyToManyField(through='ws.SignUp', to='ws.Participant'),
),
migrations.AddField(
model_name='signup',
name='trip',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Trip'
),
),
migrations.AddField(
model_name='lotteryinfo',
name='paired_with',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='paired_by',
to='ws.Participant',
),
),
migrations.AddField(
model_name='lotteryinfo',
name='participant',
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
migrations.AddField(
model_name='lectureattendance',
name='creator',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='lecture_attendances_marked',
to='ws.Participant',
),
),
migrations.AddField(
model_name='lectureattendance',
name='participant',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
migrations.AddField(
model_name='leadersignup',
name='participant',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
migrations.AddField(
model_name='leadersignup',
name='trip',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Trip'
),
),
migrations.AddField(
model_name='leaderrecommendation',
name='creator',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='recommendations_created',
to='ws.Participant',
),
),
migrations.AddField(
model_name='leaderrecommendation',
name='participant',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
migrations.AddField(
model_name='leaderrating',
name='creator',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='ratings_created',
to='ws.Participant',
),
),
migrations.AddField(
model_name='leaderrating',
name='participant',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
migrations.AddField(
model_name='hikingleaderapplication',
name='participant',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
migrations.AddField(
model_name='feedback',
name='leader',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='authored_feedback',
to='ws.Participant',
),
),
migrations.AddField(
model_name='feedback',
name='participant',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
migrations.AddField(
model_name='feedback',
name='trip',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='ws.Trip',
),
),
migrations.AddField(
model_name='discount',
name='administrators',
field=models.ManyToManyField(
blank=True,
help_text='Persons selected to administer this discount',
related_name='discounts_administered',
to='ws.Participant',
),
),
migrations.AddField(
model_name='climbingleaderapplication',
name='participant',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='ws.Participant'
),
),
]
|
innerr/stars
|
refs/heads/master
|
_libs/flup/client/scgi_app.py
|
1
|
# Copyright (c) 2006 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision$'
import select
import struct
import socket
import errno
__all__ = ['SCGIApp']
def encodeNetstring(s):
return ''.join([str(len(s)), ':', s, ','])
class SCGIApp(object):
def __init__(self, connect=None, host=None, port=None,
filterEnviron=True):
if host is not None:
assert port is not None
connect=(host, port)
assert connect is not None
self._connect = connect
self._filterEnviron = filterEnviron
def __call__(self, environ, start_response):
sock = self._getConnection()
outfile = sock.makefile('w')
infile = sock.makefile('r')
sock.close()
# Filter WSGI environ and send as request headers
if self._filterEnviron:
headers = self._defaultFilterEnviron(environ)
else:
headers = self._lightFilterEnviron(environ)
# TODO: Anything not from environ that needs to be sent also?
content_length = int(environ.get('CONTENT_LENGTH') or 0)
if headers.has_key('CONTENT_LENGTH'):
del headers['CONTENT_LENGTH']
headers_out = ['CONTENT_LENGTH', str(content_length), 'SCGI', '1']
for k,v in headers.items():
headers_out.append(k)
headers_out.append(v)
headers_out.append('') # For trailing NUL
outfile.write(encodeNetstring('\x00'.join(headers_out)))
# Transfer wsgi.input to outfile
while True:
chunk_size = min(content_length, 4096)
s = environ['wsgi.input'].read(chunk_size)
content_length -= len(s)
outfile.write(s)
if not s: break
outfile.close()
# Read result from SCGI server
result = []
while True:
buf = infile.read(4096)
if not buf: break
result.append(buf)
infile.close()
result = ''.join(result)
# Parse response headers
status = '200 OK'
headers = []
pos = 0
while True:
eolpos = result.find('\n', pos)
if eolpos < 0: break
line = result[pos:eolpos-1]
pos = eolpos + 1
# strip in case of CR. NB: This will also strip other
# whitespace...
line = line.strip()
# Empty line signifies end of headers
if not line: break
# TODO: Better error handling
header, value = line.split(':', 1)
header = header.strip().lower()
value = value.strip()
if header == 'status':
# Special handling of Status header
status = value
if status.find(' ') < 0:
# Append a dummy reason phrase if one was not provided
status += ' SCGIApp'
else:
headers.append((header, value))
result = result[pos:]
# Set WSGI status, headers, and return result.
start_response(status, headers)
return [result]
def _getConnection(self):
if type(self._connect) is str:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(self._connect)
return sock
_environPrefixes = ['SERVER_', 'HTTP_', 'REQUEST_', 'REMOTE_', 'PATH_',
'CONTENT_']
_environCopies = ['SCRIPT_NAME', 'QUERY_STRING', 'AUTH_TYPE']
_environRenames = {}
def _defaultFilterEnviron(self, environ):
result = {}
for n in environ.keys():
for p in self._environPrefixes:
if n.startswith(p):
result[n] = environ[n]
if n in self._environCopies:
result[n] = environ[n]
if n in self._environRenames:
result[self._environRenames[n]] = environ[n]
return result
def _lightFilterEnviron(self, environ):
result = {}
for n in environ.keys():
if n.upper() == n:
result[n] = environ[n]
return result
if __name__ == '__main__':
from flup.server.ajp import WSGIServer
app = SCGIApp(connect=('localhost', 4000))
#import paste.lint
#app = paste.lint.middleware(app)
WSGIServer(app).run()
|
justathoughtor2/atomicApe
|
refs/heads/encaged
|
cygwin/lib/python2.7/site-packages/astroid/tests/testdata/python2/data/absimp/string.py
|
34
|
from __future__ import absolute_import, print_function
import string
print(string)
|
jannon/slumber
|
refs/heads/master
|
slumber/utils.py
|
10
|
import posixpath
try:
from urllib.parse import urlsplit, urlunsplit
except ImportError:
from urlparse import urlsplit, urlunsplit
def url_join(base, *args):
"""
Helper function to join an arbitrary number of url segments together.
"""
scheme, netloc, path, query, fragment = urlsplit(base)
path = path if len(path) else "/"
path = posixpath.join(path, *[('%s' % x) for x in args])
return urlunsplit([scheme, netloc, path, query, fragment])
def copy_kwargs(dictionary):
kwargs = {}
for key, value in iterator(dictionary):
kwargs[key] = value
return kwargs
def iterator(d):
"""
Helper to get and a proper dict iterator with Py2k and Py3k
"""
try:
return d.iteritems()
except AttributeError:
return d.items()
|
ryfx/gyp
|
refs/heads/master
|
test/sibling/gyptest-all.py
|
100
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import TestGyp
test = TestGyp.TestGyp()
# The xcode-ninja generator handles gypfiles which are not at the
# project root incorrectly.
# cf. https://code.google.com/p/gyp/issues/detail?id=460
if test.format == 'xcode-ninja':
test.skip_test()
test.run_gyp('build/all.gyp', chdir='src')
test.build('build/all.gyp', test.ALL, chdir='src')
chdir = 'src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format in ('make', 'ninja', 'cmake'):
chdir = 'src'
if test.format == 'xcode':
chdir = 'src/prog1'
test.run_built_executable('program1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'src/prog2'
test.run_built_executable('program2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
|
CraigLoomis/ics_hxActor
|
refs/heads/master
|
python/hxActor/main.py
|
1
|
#!/usr/bin/env python
import logging
import actorcore.ICC
try:
from pfscore import spectroIds
instrument = "PFS"
except ImportError:
instrument = "CHARIS"
class OurActor(actorcore.ICC.ICC):
def __init__(self, name,
productName=None,
camName=None,
imageCamName=None,
debugLevel=30):
""" Setup an Actor instance. See help for actorcore.Actor for details. """
self.instrument = instrument
if instrument == 'PFS':
if imageCamName is None:
imageCamName = camName
self.spectroIds = spectroIds.SpectroIds(partName=imageCamName)
if camName is None:
camName = self.spectroIds.camName
specName = self.spectroIds.specName
name = f"hx_{camName}"
modelNames = ("hx_{camName}", "xcu_{camName}", "enu_{specName}")
else:
name = "hx"
modelNames = ('hx',)
# This sets up the connections to/from the hub, the logger, and the twisted reactor.
#
actorcore.ICC.ICC.__init__(self, name,
productName=productName,
modelNames=modelNames)
self.everConnected = False
def connectionMade(self):
if self.everConnected is False:
self.logger.info(f'ids: {self.spectroIds.idDict}')
self.logger.info("Attaching all controllers...")
self.allControllers = [s.strip() for s in self.config.get(self.name, 'startingControllers').split(',')]
self.attachAllControllers()
self.everConnected = True
#
# To work
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--logLevel', default=logging.INFO, type=int, nargs='?',
help='logging level')
parser.add_argument('--cam', default=None, nargs='?',
help='camera name')
parser.add_argument('--imageCam', default=None, nargs='?',
help='camera name for image files')
args = parser.parse_args()
theActor = OurActor(None,
productName='hxActor',
camName=args.cam,
imageCamName=args.imageCam)
theActor.run()
if __name__ == '__main__':
main()
|
Amber-Creative/amber-frappe
|
refs/heads/master
|
frappe/model/docfield.py
|
61
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""docfield utililtes"""
import frappe
def rename(doctype, fieldname, newname):
"""rename docfield"""
df = frappe.db.sql("""select * from tabDocField where parent=%s and fieldname=%s""",
(doctype, fieldname), as_dict=1)
if not df:
return
df = df[0]
if frappe.db.get_value('DocType', doctype, 'issingle'):
update_single(df, newname)
else:
update_table(df, newname)
update_parent_field(df, newname)
def update_single(f, new):
"""update in tabSingles"""
frappe.db.begin()
frappe.db.sql("""update tabSingles set field=%s where doctype=%s and field=%s""",
(new, f['parent'], f['fieldname']))
frappe.db.commit()
def update_table(f, new):
"""update table"""
query = get_change_column_query(f, new)
if query:
frappe.db.sql(query)
def update_parent_field(f, new):
"""update 'parentfield' in tables"""
if f['fieldtype']=='Table':
frappe.db.begin()
frappe.db.sql("""update `tab%s` set parentfield=%s where parentfield=%s""" \
% (f['options'], '%s', '%s'), (new, f['fieldname']))
frappe.db.commit()
def get_change_column_query(f, new):
"""generate change fieldname query"""
desc = frappe.db.sql("desc `tab%s`" % f['parent'])
for d in desc:
if d[0]== f['fieldname']:
return 'alter table `tab%s` change `%s` `%s` %s' % \
(f['parent'], f['fieldname'], new, d[1])
|
devsim/devsim
|
refs/heads/main
|
examples/diode/diode_1d.py
|
1
|
# Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from devsim import *
import devsim.python_packages.simple_physics as simple_physics
import diode_common
#####
# dio1
#
# Make doping a step function
# print dat to text file for viewing in grace
# verify currents analytically
# in dio2 add recombination
#
device="MyDevice"
region="MyRegion"
diode_common.CreateMesh(device=device, region=region)
diode_common.SetParameters(device=device, region=region)
set_parameter(device=device, region=region, name="taun", value=1e-8)
set_parameter(device=device, region=region, name="taup", value=1e-8)
diode_common.SetNetDoping(device=device, region=region)
print_node_values(device=device, region=region, name="NetDoping")
diode_common.InitialSolution(device, region)
# Initial DC solution
solve(type="dc", absolute_error=1.0, relative_error=1e-10, maximum_iterations=30)
diode_common.DriftDiffusionInitialSolution(device, region)
###
### Drift diffusion simulation at equilibrium
###
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=30)
####
#### Ramp the bias to 0.5 Volts
####
v = 0.0
while v < 0.51:
set_parameter(device=device, name=simple_physics.GetContactBiasName("top"), value=v)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=30)
simple_physics.PrintCurrents(device, "top")
simple_physics.PrintCurrents(device, "bot")
v += 0.1
write_devices(file="diode_1d.dat", type="tecplot")
#import matplotlib
#import matplotlib.pyplot
#x=get_node_model_values(device=device, region=region, name="x")
#ymax = 10
#ymin = 10
#fields = ("Electrons", "Holes", "Donors", "Acceptors")
#for i in fields:
# y=get_node_model_values(device=device, region=region, name=i)
# if (max(y) > ymax):
# ymax = max(y)
# matplotlib.pyplot.semilogy(x, y)
#matplotlib.pyplot.xlabel('x (cm)')
#matplotlib.pyplot.ylabel('Density (#/cm^3)')
#matplotlib.pyplot.legend(fields)
#ymax *= 10
#matplotlib.pyplot.axis([min(x), max(x), ymin, ymax])
#matplotlib.pyplot.savefig("diode_1d_density.eps")
#
#matplotlib.pyplot.clf()
#edge_average_model(device=device, region=region, node_model="x", edge_model="xmid")
#xmid=get_edge_model_values(device=device, region=region, name="xmid")
#efields = ("ElectronCurrent", "HoleCurrent", )
#y=get_edge_model_values(device=device, region=region, name="ElectronCurrent")
#ymin=min(y)
#ymax=max(y)
#for i in efields:
# y=get_edge_model_values(device=device, region=region, name=i)
# if min(y) < ymin:
# ymin = min(y)
# elif max(y) > ymax:
# ymax = max(y)
# matplotlib.pyplot.plot(xmid, y)
#matplotlib.pyplot.xlabel('x (cm)')
#matplotlib.pyplot.ylabel('J (A/cm^2)')
#matplotlib.pyplot.legend(efields)
#matplotlib.pyplot.axis([min(x), max(x), 0.5*ymin, 2*ymax])
#matplotlib.pyplot.savefig("diode_1d_current.eps")
#print ymin
#print ymax
|
DelazJ/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsaggregatemappingwidget.py
|
18
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsAggregateMapping widget and model.
From build dir, run: ctest -R PyQgsAggregateMappingWidget -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '03/06/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (
QgsFields,
QgsField,
QgsFieldConstraints,
)
from qgis.gui import (
QgsAggregateMappingWidget,
QgsAggregateMappingModel,
)
from qgis.PyQt.Qt import Qt
from qgis.PyQt.QtCore import (
QCoreApplication,
QVariant,
QModelIndex,
QItemSelectionModel,
)
from qgis.PyQt.QtGui import (
QColor
)
from qgis.testing import start_app, unittest
class TestPyQgsAggregateMappingModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain(cls.__name__)
QCoreApplication.setApplicationName(cls.__name__)
start_app()
def setUp(self):
"""Run before each test"""
source_fields = QgsFields()
f = QgsField('source_field1', QVariant.String)
self.assertTrue(source_fields.append(f))
f = QgsField('source_field2', QVariant.Int, 'integer', 10, 8)
self.assertTrue(source_fields.append(f))
self.source_fields = source_fields
def _showDialog(self, widget):
"""Used during development"""
from qgis.PyQt.QtWidgets import QDialog, QVBoxLayout
d = QDialog()
l = QVBoxLayout()
l.addWidget(widget)
d.setLayout(l)
d.exec()
def testModel(self):
"""Test the mapping model"""
model = QgsAggregateMappingModel(self.source_fields)
self.assertEqual(model.rowCount(QModelIndex()), 2)
self.assertIsNone(model.data(model.index(9999, 0), Qt.DisplayRole))
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), '"source_field1"')
self.assertEqual(model.data(model.index(0, 1), Qt.DisplayRole), 'concatenate')
self.assertEqual(model.data(model.index(0, 2), Qt.DisplayRole), ',')
self.assertEqual(model.data(model.index(0, 3), Qt.DisplayRole), 'source_field1')
self.assertEqual(model.data(model.index(0, 4), Qt.DisplayRole), QVariant.String)
self.assertEqual(model.data(model.index(0, 5), Qt.DisplayRole), 0)
self.assertEqual(model.data(model.index(0, 6), Qt.DisplayRole), 0)
self.assertEqual(model.data(model.index(1, 0), Qt.DisplayRole), '"source_field2"')
self.assertEqual(model.data(model.index(1, 1), Qt.DisplayRole), 'sum')
self.assertEqual(model.data(model.index(1, 2), Qt.DisplayRole), ',')
self.assertEqual(model.data(model.index(1, 3), Qt.DisplayRole), 'source_field2')
self.assertEqual(model.data(model.index(1, 4), Qt.DisplayRole), QVariant.Int)
self.assertEqual(model.data(model.index(1, 5), Qt.DisplayRole), 10)
self.assertEqual(model.data(model.index(1, 6), Qt.DisplayRole), 8)
# Test expression scope
ctx = model.contextGenerator().createExpressionContext()
self.assertTrue('source_field1' in ctx.fields().names())
# Test add fields
model.appendField(QgsField('field3', QVariant.String), 'upper("field3")', 'first_value')
self.assertEqual(model.rowCount(QModelIndex()), 3)
self.assertEqual(model.data(model.index(2, 0), Qt.DisplayRole), 'upper("field3")')
self.assertEqual(model.data(model.index(2, 1), Qt.DisplayRole), 'first_value')
self.assertEqual(model.data(model.index(2, 2), Qt.DisplayRole), ',')
self.assertEqual(model.data(model.index(2, 3), Qt.DisplayRole), 'field3')
self.assertEqual(model.data(model.index(2, 4), Qt.DisplayRole), QVariant.String)
self.assertEqual(model.data(model.index(2, 5), Qt.DisplayRole), 0)
self.assertEqual(model.data(model.index(2, 6), Qt.DisplayRole), 0)
# Test remove field
model.removeField(model.index(1, 0))
self.assertEqual(model.rowCount(QModelIndex()), 2)
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), '"source_field1"')
self.assertEqual(model.data(model.index(1, 0), Qt.DisplayRole), 'upper("field3")')
# Test edit fields
mapping = model.mapping()
self.assertEqual(mapping[0].field.name(), 'source_field1')
self.assertEqual(mapping[0].aggregate, 'concatenate')
self.assertEqual(mapping[0].delimiter, ',')
self.assertEqual(mapping[0].source, '"source_field1"')
self.assertEqual(mapping[1].field.name(), 'field3')
self.assertEqual(mapping[1].aggregate, 'first_value')
self.assertEqual(mapping[1].delimiter, ',')
self.assertEqual(mapping[1].source, 'upper("field3")')
# Test move up or down
self.assertFalse(model.moveUp(model.index(0, 0)))
self.assertFalse(model.moveUp(model.index(100, 0)))
self.assertFalse(model.moveDown(model.index(1, 0)))
self.assertFalse(model.moveDown(model.index(100, 0)))
self.assertTrue(model.moveDown(model.index(0, 0)))
mapping = model.mapping()
self.assertEqual(mapping[0].field.name(), 'field3')
self.assertEqual(mapping[1].field.name(), 'source_field1')
self.assertTrue(model.moveUp(model.index(1, 0)))
mapping = model.mapping()
self.assertEqual(mapping[0].field.name(), 'source_field1')
self.assertEqual(mapping[1].field.name(), 'field3')
def testSetSourceFields(self):
"""Test that changing source fields also empty expressions are updated"""
model = QgsAggregateMappingModel(self.source_fields)
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), '"source_field1"')
self.assertEqual(model.data(model.index(0, 3), Qt.DisplayRole), 'source_field1')
self.assertEqual(model.data(model.index(1, 0), Qt.DisplayRole), '"source_field2"')
self.assertEqual(model.data(model.index(1, 3), Qt.DisplayRole), 'source_field2')
f = QgsField('source_field3', QVariant.String)
fields = self.source_fields
fields.append(f)
model.setSourceFields(fields)
self.assertEqual(model.rowCount(), 3)
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), '"source_field1"')
self.assertEqual(model.data(model.index(0, 3), Qt.DisplayRole), 'source_field1')
self.assertEqual(model.data(model.index(1, 0), Qt.DisplayRole), '"source_field2"')
self.assertEqual(model.data(model.index(1, 3), Qt.DisplayRole), 'source_field2')
self.assertEqual(model.data(model.index(2, 0), Qt.DisplayRole), '"source_field3"')
self.assertEqual(model.data(model.index(2, 3), Qt.DisplayRole), 'source_field3')
def testProperties(self):
model = QgsAggregateMappingModel(self.source_fields)
mapping = model.mapping()
self.assertEqual(mapping[0].field.name(), 'source_field1')
self.assertEqual(mapping[0].source, '"source_field1"')
self.assertEqual(mapping[0].aggregate, 'concatenate')
self.assertEqual(mapping[0].delimiter, ',')
self.assertEqual(mapping[1].field.name(), 'source_field2')
self.assertEqual(mapping[1].source, '"source_field2"')
self.assertEqual(mapping[1].aggregate, 'sum')
self.assertEqual(mapping[1].delimiter, ',')
mapping[0].source = 'upper("source_field2")'
mapping[0].aggregate = 'first_value'
mapping[0].delimiter = '|'
new_aggregate = QgsAggregateMappingModel.Aggregate()
new_aggregate.field = QgsField('output_field3', QVariant.Double, len=4, prec=2)
new_aggregate.source = 'randf(1,2)'
new_aggregate.aggregate = 'mean'
new_aggregate.delimiter = '*'
mapping.append(new_aggregate)
model.setMapping(mapping)
self.assertEqual(model.rowCount(), 3)
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), 'upper("source_field2")')
self.assertEqual(model.data(model.index(0, 1), Qt.DisplayRole), 'first_value')
self.assertEqual(model.data(model.index(0, 2), Qt.DisplayRole), '|')
self.assertEqual(model.data(model.index(0, 3), Qt.DisplayRole), 'source_field1')
self.assertEqual(model.data(model.index(0, 4), Qt.DisplayRole), QVariant.String)
self.assertEqual(model.data(model.index(0, 5), Qt.DisplayRole), 0)
self.assertEqual(model.data(model.index(0, 6), Qt.DisplayRole), 0)
self.assertEqual(model.data(model.index(1, 0), Qt.DisplayRole), '"source_field2"')
self.assertEqual(model.data(model.index(1, 1), Qt.DisplayRole), 'sum')
self.assertEqual(model.data(model.index(1, 2), Qt.DisplayRole), ',')
self.assertEqual(model.data(model.index(1, 3), Qt.DisplayRole), 'source_field2')
self.assertEqual(model.data(model.index(1, 4), Qt.DisplayRole), QVariant.Int)
self.assertEqual(model.data(model.index(1, 5), Qt.DisplayRole), 10)
self.assertEqual(model.data(model.index(1, 6), Qt.DisplayRole), 8)
self.assertEqual(model.data(model.index(2, 0), Qt.DisplayRole), 'randf(1,2)')
self.assertEqual(model.data(model.index(2, 1), Qt.DisplayRole), 'mean')
self.assertEqual(model.data(model.index(2, 2), Qt.DisplayRole), '*')
self.assertEqual(model.data(model.index(2, 3), Qt.DisplayRole), 'output_field3')
self.assertEqual(model.data(model.index(2, 4), Qt.DisplayRole), QVariant.Double)
self.assertEqual(model.data(model.index(2, 5), Qt.DisplayRole), 4)
self.assertEqual(model.data(model.index(2, 6), Qt.DisplayRole), 2)
def testWidget(self):
"""Test widget operations"""
widget = QgsAggregateMappingWidget()
for i in range(10):
widget.appendField(QgsField(str(i)), source=str(i))
self.assertTrue(widget.model().rowCount(QModelIndex()), 10)
def _compare(widget, expected):
actual = []
for aggregate in widget.mapping():
actual.append(int(aggregate.source))
self.assertEqual(actual, expected)
_compare(widget, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
selection_model = widget.selectionModel()
selection_model.clear()
for i in range(0, 10, 2):
selection_model.select(widget.model().index(i, 0), QItemSelectionModel.Select)
self.assertTrue(widget.moveSelectedFieldsDown())
_compare(widget, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8])
selection_model.clear()
for i in range(1, 10, 2):
selection_model.select(widget.model().index(i, 0), QItemSelectionModel.Select)
self.assertTrue(widget.moveSelectedFieldsUp())
_compare(widget, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
selection_model.clear()
for i in range(0, 10, 2):
selection_model.select(widget.model().index(i, 0), QItemSelectionModel.Select)
self.assertTrue(widget.removeSelectedFields())
_compare(widget, [1, 3, 5, 7, 9])
widget.setSourceFields(self.source_fields)
mapping = widget.mapping()
self.assertEqual(mapping[0].field.name(), 'source_field1')
self.assertEqual(mapping[0].source, '"source_field1"')
self.assertEqual(mapping[0].aggregate, 'concatenate')
self.assertEqual(mapping[0].delimiter, ',')
self.assertEqual(mapping[1].field.name(), 'source_field2')
self.assertEqual(mapping[1].source, '"source_field2"')
self.assertEqual(mapping[1].aggregate, 'sum')
self.assertEqual(mapping[1].delimiter, ',')
mapping[0].source = 'upper("source_field2")'
mapping[0].aggregate = 'first_value'
mapping[0].delimiter = '|'
new_aggregate = QgsAggregateMappingModel.Aggregate()
new_aggregate.field = QgsField('output_field3', QVariant.Double, len=4, prec=2)
new_aggregate.source = 'randf(1,2)'
new_aggregate.aggregate = 'mean'
new_aggregate.delimiter = '*'
mapping.append(new_aggregate)
widget.setMapping(mapping)
mapping = widget.mapping()
self.assertEqual(mapping[0].field.name(), 'source_field1')
self.assertEqual(mapping[0].source, 'upper("source_field2")')
self.assertEqual(mapping[0].aggregate, 'first_value')
self.assertEqual(mapping[0].delimiter, '|')
self.assertEqual(mapping[1].field.name(), 'source_field2')
self.assertEqual(mapping[1].source, '"source_field2"')
self.assertEqual(mapping[1].aggregate, 'sum')
self.assertEqual(mapping[1].delimiter, ',')
self.assertEqual(mapping[2].field.name(), 'output_field3')
self.assertEqual(mapping[2].source, 'randf(1,2)')
self.assertEqual(mapping[2].aggregate, 'mean')
self.assertEqual(mapping[2].delimiter, '*')
if __name__ == '__main__':
unittest.main()
|
omarkohl/pytest
|
refs/heads/master
|
doc/en/example/assertion/failure_demo.py
|
179
|
from pytest import raises
import _pytest._code
import py
def otherfunc(a,b):
assert a==b
def somefunc(x,y):
otherfunc(x,y)
def otherfunc_multi(a,b):
assert (a ==
b)
def test_generative(param1, param2):
assert param1 * 2 < param2
def pytest_generate_tests(metafunc):
if 'param1' in metafunc.fixturenames:
metafunc.addcall(funcargs=dict(param1=3, param2=6))
class TestFailing(object):
def test_simple(self):
def f():
return 42
def g():
return 43
assert f() == g()
def test_simple_multiline(self):
otherfunc_multi(
42,
6*9)
def test_not(self):
def f():
return 42
assert not f()
class TestSpecialisedExplanations(object):
def test_eq_text(self):
assert 'spam' == 'eggs'
def test_eq_similar_text(self):
assert 'foo 1 bar' == 'foo 2 bar'
def test_eq_multiline_text(self):
assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
def test_eq_long_text(self):
a = '1'*100 + 'a' + '2'*100
b = '1'*100 + 'b' + '2'*100
assert a == b
def test_eq_long_text_multiline(self):
a = '1\n'*100 + 'a' + '2\n'*100
b = '1\n'*100 + 'b' + '2\n'*100
assert a == b
def test_eq_list(self):
assert [0, 1, 2] == [0, 1, 3]
def test_eq_list_long(self):
a = [0]*100 + [1] + [3]*100
b = [0]*100 + [2] + [3]*100
assert a == b
def test_eq_dict(self):
assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
def test_eq_set(self):
assert set([0, 10, 11, 12]) == set([0, 20, 21])
def test_eq_longer_list(self):
assert [1,2] == [1,2,3]
def test_in_list(self):
assert 1 in [0, 2, 3, 4, 5]
def test_not_in_text_multiline(self):
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
assert 'foo' not in text
def test_not_in_text_single(self):
text = 'single foo line'
assert 'foo' not in text
def test_not_in_text_single_long(self):
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
assert 'foo' not in text
def test_not_in_text_single_long_term(self):
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
assert 'f'*70 not in text
def test_attribute():
class Foo(object):
b = 1
i = Foo()
assert i.b == 2
def test_attribute_instance():
class Foo(object):
b = 1
assert Foo().b == 2
def test_attribute_failure():
class Foo(object):
def _get_b(self):
raise Exception('Failed to get attrib')
b = property(_get_b)
i = Foo()
assert i.b == 2
def test_attribute_multiple():
class Foo(object):
b = 1
class Bar(object):
b = 2
assert Foo().b == Bar().b
def globf(x):
return x+1
class TestRaises:
def test_raises(self):
s = 'qwe'
raises(TypeError, "int(s)")
def test_raises_doesnt(self):
raises(IOError, "int('3')")
def test_raise(self):
raise ValueError("demo error")
def test_tupleerror(self):
a,b = [1]
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
l = [1,2,3]
print ("l is %r" % l)
a,b = l.pop()
def test_some_error(self):
if namenotexi:
pass
def func1(self):
assert 41 == 42
# thanks to Matthew Scott for this test
def test_dynamic_compile_shows_nicely():
src = 'def foo():\n assert 1 == 0\n'
name = 'abc-123'
module = py.std.imp.new_module(name)
code = _pytest._code.compile(src, name, 'exec')
py.builtin.exec_(code, module.__dict__)
py.std.sys.modules[name] = module
module.foo()
class TestMoreErrors:
def test_complex_error(self):
def f():
return 44
def g():
return 43
somefunc(f(), g())
def test_z1_unpack_error(self):
l = []
a,b = l
def test_z2_type_error(self):
l = 3
a,b = l
def test_startswith(self):
s = "123"
g = "456"
assert s.startswith(g)
def test_startswith_nested(self):
def f():
return "123"
def g():
return "456"
assert f().startswith(g())
def test_global_func(self):
assert isinstance(globf(42), float)
def test_instance(self):
self.x = 6*7
assert self.x != 42
def test_compare(self):
assert globf(10) < 5
def test_try_finally(self):
x = 1
try:
assert x == 0
finally:
x = 0
class TestCustomAssertMsg:
def test_single_line(self):
class A:
a = 1
b = 2
assert A.a == b, "A.a appears not to be b"
def test_multiline(self):
class A:
a = 1
b = 2
assert A.a == b, "A.a appears not to be b\n" \
"or does not appear to be b\none of those"
def test_custom_repr(self):
class JSON:
a = 1
def __repr__(self):
return "This is JSON\n{\n 'foo': 'bar'\n}"
a = JSON()
b = 2
assert a.a == b, a
|
jmesteve/openerp
|
refs/heads/master
|
openerp/addons/account/account_invoice.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
import openerp.addons.decimal_precision as dp
import openerp.exceptions
from openerp import netsvc
from openerp import pooler
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class account_invoice(osv.osv):
def _amount_all(self, cr, uid, ids, name, args, context=None):
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
res[invoice.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0
}
for line in invoice.invoice_line:
res[invoice.id]['amount_untaxed'] += line.price_subtotal
for line in invoice.tax_line:
res[invoice.id]['amount_tax'] += line.amount
res[invoice.id]['amount_total'] = res[invoice.id]['amount_tax'] + res[invoice.id]['amount_untaxed']
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
type_inv = context.get('type', 'out_invoice')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale_refund', 'in_refund': 'purchase_refund'}
journal_obj = self.pool.get('account.journal')
domain = [('company_id', '=', company_id)]
if isinstance(type_inv, list):
domain.append(('type', 'in', [type2journal.get(type) for type in type_inv if type2journal.get(type)]))
else:
domain.append(('type', '=', type2journal.get(type_inv, 'sale')))
res = journal_obj.search(cr, uid, domain, limit=1)
return res and res[0] or False
def _get_currency(self, cr, uid, context=None):
res = False
journal_id = self._get_journal(cr, uid, context=context)
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
res = journal.currency and journal.currency.id or journal.company_id.currency_id.id
return res
def _get_journal_analytic(self, cr, uid, type_inv, context=None):
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale', 'in_refund': 'purchase'}
tt = type2journal.get(type_inv, 'sale')
result = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=',tt)], context=context)
if not result:
raise osv.except_osv(_('No Analytic Journal!'),_("You must define an analytic journal of type '%s'!") % (tt,))
return result[0]
def _get_type(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('type', 'out_invoice')
def _reconciled(self, cr, uid, ids, name, args, context=None):
res = {}
wf_service = netsvc.LocalService("workflow")
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = self.test_paid(cr, uid, [inv.id])
if not res[inv.id] and inv.state == 'paid':
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'open_test', cr)
return res
def _get_reference_type(self, cr, uid, context=None):
return [('none', _('Free Reference'))]
def _amount_residual(self, cr, uid, ids, name, args, context=None):
"""Function of the field residua. It computes the residual amount (balance) for each invoice"""
if context is None:
context = {}
ctx = context.copy()
result = {}
currency_obj = self.pool.get('res.currency')
for invoice in self.browse(cr, uid, ids, context=context):
nb_inv_in_partial_rec = max_invoice_id = 0
result[invoice.id] = 0.0
if invoice.move_id:
for aml in invoice.move_id.line_id:
if aml.account_id.type in ('receivable','payable'):
if aml.currency_id and aml.currency_id.id == invoice.currency_id.id:
result[invoice.id] += aml.amount_residual_currency
else:
ctx['date'] = aml.date
result[invoice.id] += currency_obj.compute(cr, uid, aml.company_id.currency_id.id, invoice.currency_id.id, aml.amount_residual, context=ctx)
if aml.reconcile_partial_id.line_partial_ids:
#we check if the invoice is partially reconciled and if there are other invoices
#involved in this partial reconciliation (and we sum these invoices)
for line in aml.reconcile_partial_id.line_partial_ids:
if line.invoice:
nb_inv_in_partial_rec += 1
#store the max invoice id as for this invoice we will make a balance instead of a simple division
max_invoice_id = max(max_invoice_id, line.invoice.id)
if nb_inv_in_partial_rec:
#if there are several invoices in a partial reconciliation, we split the residual by the number
#of invoice to have a sum of residual amounts that matches the partner balance
new_value = currency_obj.round(cr, uid, invoice.currency_id, result[invoice.id] / nb_inv_in_partial_rec)
if invoice.id == max_invoice_id:
#if it's the last the invoice of the bunch of invoices partially reconciled together, we make a
#balance to avoid rounding errors
result[invoice.id] = result[invoice.id] - ((nb_inv_in_partial_rec - 1) * new_value)
else:
result[invoice.id] = new_value
#prevent the residual amount on the invoice to be less than 0
result[invoice.id] = max(result[invoice.id], 0.0)
return result
# Give Journal Items related to the payment reconciled to this invoice
# Return ids of partial and total payments related to the selected invoices
def _get_lines(self, cr, uid, ids, name, arg, context=None):
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
id = invoice.id
res[id] = []
if not invoice.move_id:
continue
data_lines = [x for x in invoice.move_id.line_id if x.account_id.id == invoice.account_id.id]
partial_ids = []
for line in data_lines:
ids_line = []
if line.reconcile_id:
ids_line = line.reconcile_id.line_id
elif line.reconcile_partial_id:
ids_line = line.reconcile_partial_id.line_partial_ids
l = map(lambda x: x.id, ids_line)
partial_ids.append(line.id)
res[id] =[x for x in l if x <> line.id and x not in partial_ids]
return res
def _get_invoice_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.invoice.line').browse(cr, uid, ids, context=context):
result[line.invoice_id.id] = True
return result.keys()
def _get_invoice_tax(self, cr, uid, ids, context=None):
result = {}
for tax in self.pool.get('account.invoice.tax').browse(cr, uid, ids, context=context):
result[tax.invoice_id.id] = True
return result.keys()
def _compute_lines(self, cr, uid, ids, name, args, context=None):
result = {}
for invoice in self.browse(cr, uid, ids, context=context):
src = []
lines = []
if invoice.move_id:
for m in invoice.move_id.line_id:
temp_lines = []
if m.reconcile_id:
temp_lines = map(lambda x: x.id, m.reconcile_id.line_id)
elif m.reconcile_partial_id:
temp_lines = map(lambda x: x.id, m.reconcile_partial_id.line_partial_ids)
lines += [x for x in temp_lines if x not in lines]
src.append(m.id)
lines = filter(lambda x: x not in src, lines)
result[invoice.id] = lines
return result
def _get_invoice_from_line(self, cr, uid, ids, context=None):
move = {}
for line in self.pool.get('account.move.line').browse(cr, uid, ids, context=context):
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
move[line2.move_id.id] = True
if line.reconcile_id:
for line2 in line.reconcile_id.line_id:
move[line2.move_id.id] = True
invoice_ids = []
if move:
invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('move_id','in',move.keys())], context=context)
return invoice_ids
def _get_invoice_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
invoice_ids = []
if move:
invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('move_id','in',move.keys())], context=context)
return invoice_ids
_name = "account.invoice"
_inherit = ['mail.thread']
_description = 'Invoice'
_order = "id desc"
_track = {
'type': {
},
'state': {
'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'paid' and obj['type'] in ('out_invoice', 'out_refund'),
'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'open' and obj['type'] in ('out_invoice', 'out_refund'),
},
}
_columns = {
'name': fields.char('Description', size=64, select=True, readonly=True, states={'draft':[('readonly',False)]}),
'origin': fields.char('Source Document', size=64, help="Reference of the document that produced this invoice.", readonly=True, states={'draft':[('readonly',False)]}),
'supplier_invoice_number': fields.char('Supplier Invoice Number', size=64, help="The reference of this invoice as provided by the supplier.", readonly=True, states={'draft':[('readonly',False)]}),
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True, select=True, change_default=True, track_visibility='always'),
'number': fields.related('move_id','name', type='char', readonly=True, size=64, relation='account.move', store=True, string='Number'),
'internal_number': fields.char('Invoice Number', size=32, readonly=True, help="Unique number of the invoice, computed automatically when the invoice is created."),
'reference': fields.char('Invoice Reference', size=64, help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Payment Reference',
required=True, readonly=True, states={'draft':[('readonly',False)]}),
'comment': fields.text('Additional Information'),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Paid'),
('cancel','Cancelled'),
],'Status', select=True, readonly=True, track_visibility='onchange',
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Invoice. \
\n* The \'Pro-forma\' when invoice is in Pro-forma status,invoice does not have an invoice number. \
\n* The \'Open\' status is used when user create invoice,a invoice number is generated.Its in open status till user does not pay invoice. \
\n* The \'Paid\' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled. \
\n* The \'Cancelled\' status is used when user cancel invoice.'),
'sent': fields.boolean('Sent', readonly=True, help="It indicates that the invoice has been sent."),
'date_invoice': fields.date('Invoice Date', readonly=True, states={'draft':[('readonly',False)]}, select=True, help="Keep empty to use the current date"),
'date_due': fields.date('Due Date', readonly=True, states={'draft':[('readonly',False)]}, select=True,
help="If you use payment terms, the due date will be computed automatically at the generation "\
"of accounting entries. The payment term may compute several due dates, for example 50% now and 50% in one month, but if you want to force a due date, make sure that the payment term is not set on the invoice. If you keep the payment term and the due date empty, it means direct payment."),
'partner_id': fields.many2one('res.partner', 'Partner', change_default=True, readonly=True, required=True, states={'draft':[('readonly',False)]}, track_visibility='always'),
'payment_term': fields.many2one('account.payment.term', 'Payment Terms',readonly=True, states={'draft':[('readonly',False)]},
help="If you use payment terms, the due date will be computed automatically at the generation "\
"of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "\
"The payment term may compute several due dates, for example 50% now, 50% in one month."),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], help="Keep empty to use the period of the validation(invoice) date.", readonly=True, states={'draft':[('readonly',False)]}),
'account_id': fields.many2one('account.account', 'Account', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="The partner account used for this invoice."),
'invoice_line': fields.one2many('account.invoice.line', 'invoice_id', 'Invoice Lines', readonly=True, states={'draft':[('readonly',False)]}),
'tax_line': fields.one2many('account.invoice.tax', 'invoice_id', 'Tax Lines', readonly=True, states={'draft':[('readonly',False)]}),
'move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, select=1, ondelete='restrict', help="Link to the automatically generated Journal Items."),
'amount_untaxed': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Subtotal', track_visibility='always',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'amount_tax': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Tax',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'amount_total': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Total',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'currency_id': fields.many2one('res.currency', 'Currency', required=True, readonly=True, states={'draft':[('readonly',False)]}, track_visibility='always'),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, change_default=True, readonly=True, states={'draft':[('readonly',False)]}),
'check_total': fields.float('Verification Total', digits_compute=dp.get_precision('Account'), readonly=True, states={'draft':[('readonly',False)]}),
'reconciled': fields.function(_reconciled, string='Paid/Reconciled', type='boolean',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, None, 50), # Check if we can remove ?
'account.move.line': (_get_invoice_from_line, None, 50),
'account.move.reconcile': (_get_invoice_from_reconcile, None, 50),
}, help="It indicates that the invoice has been paid and the journal entry of the invoice has been reconciled with one or several journal entries of payment."),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.', readonly=True, states={'draft':[('readonly',False)]}),
'move_lines':fields.function(_get_lines, type='many2many', relation='account.move.line', string='Entry Lines'),
'residual': fields.function(_amount_residual, digits_compute=dp.get_precision('Account'), string='Balance',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line','move_id'], 50),
'account.invoice.tax': (_get_invoice_tax, None, 50),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 50),
'account.move.line': (_get_invoice_from_line, None, 50),
'account.move.reconcile': (_get_invoice_from_reconcile, None, 50),
},
help="Remaining amount due."),
'payment_ids': fields.function(_compute_lines, relation='account.move.line', type="many2many", string='Payments'),
'move_name': fields.char('Journal Entry', size=64, readonly=True, states={'draft':[('readonly',False)]}),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True, track_visibility='onchange', states={'draft':[('readonly',False)]}),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True, states={'draft':[('readonly',False)]})
}
_defaults = {
'type': _get_type,
'state': 'draft',
'journal_id': _get_journal,
'currency_id': _get_currency,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.invoice', context=c),
'reference_type': 'none',
'check_total': 0.0,
'internal_number': False,
'user_id': lambda s, cr, u, c: u,
'sent': False,
}
_sql_constraints = [
('number_uniq', 'unique(number, company_id, journal_id, type)', 'Invoice Number must be unique per Company!'),
]
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
if context.get('active_model', '') in ['res.partner'] and context.get('active_ids', False) and context['active_ids']:
partner = self.pool.get(context['active_model']).read(cr, uid, context['active_ids'], ['supplier','customer'])[0]
if not view_type:
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])
view_type = 'tree'
if view_type == 'form':
if partner['supplier'] and not partner['customer']:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')])
elif partner['customer'] and not partner['supplier']:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')])
if view_id and isinstance(view_id, (list, tuple)):
view_id = view_id[0]
res = super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
type = context.get('journal_type', False)
for field in res['fields']:
if field == 'journal_id' and type:
journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1)
res['fields'][field]['selection'] = journal_select
doc = etree.XML(res['arch'])
if context.get('type', False):
for node in doc.xpath("//field[@name='partner_bank_id']"):
if context['type'] == 'in_refund':
node.set('domain', "[('partner_id.ref_companies', 'in', [company_id])]")
elif context['type'] == 'out_refund':
node.set('domain', "[('partner_id', '=', partner_id)]")
res['arch'] = etree.tostring(doc)
if view_type == 'search':
if context.get('type', 'in_invoice') in ('out_invoice', 'out_refund'):
for node in doc.xpath("//group[@name='extended filter']"):
doc.remove(node)
res['arch'] = etree.tostring(doc)
if view_type == 'tree':
partner_string = _('Customer')
if context.get('type', 'out_invoice') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in doc.xpath("//field[@name='reference']"):
node.set('invisible', '0')
for node in doc.xpath("//field[@name='partner_id']"):
node.set('string', partner_string)
res['arch'] = etree.tostring(doc)
return res
def get_log_context(self, cr, uid, context=None):
if context is None:
context = {}
res = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'invoice_form')
view_id = res and res[1] or False
context['view_id'] = view_id
return context
def invoice_print(self, cr, uid, ids, context=None):
'''
This function prints the invoice and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.write(cr, uid, ids, {'sent': True}, context=context)
datas = {
'ids': ids,
'model': 'account.invoice',
'form': self.read(cr, uid, ids[0], context=context)
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.invoice',
'datas': datas,
'nodestroy' : True
}
def action_invoice_sent(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi invoice template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'account', 'email_template_edi_invoice')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'account.invoice',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_invoice_as_sent': True,
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def confirm_paid(self, cr, uid, ids, context=None):
if context is None:
context = {}
self.write(cr, uid, ids, {'state':'paid'}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoices = self.read(cr, uid, ids, ['state','internal_number'], context=context)
unlink_ids = []
for t in invoices:
if t['state'] not in ('draft', 'cancel'):
raise openerp.exceptions.Warning(_('You cannot delete an invoice which is not draft or cancelled. You should refund it instead.'))
# cambio para poder eliminar las facturas validadas -> anulado elif
#elif t['internal_number']:
# raise openerp.exceptions.Warning(_('You cannot delete an invoice after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.'))
else:
unlink_ids.append(t['id'])
osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,\
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
partner_payment_term = False
acc_id = False
bank_id = False
fiscal_position = False
opt = [('uid', str(uid))]
if partner_id:
opt.insert(0, ('id', partner_id))
p = self.pool.get('res.partner').browse(cr, uid, partner_id)
if company_id:
if (p.property_account_receivable.company_id and (p.property_account_receivable.company_id.id != company_id)) and (p.property_account_payable.company_id and (p.property_account_payable.company_id.id != company_id)):
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr,uid,rec_pro_id,['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr,uid,pay_pro_id,['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of accounts for this company, you should create one.'))
account_obj = self.pool.get('account.account')
rec_obj_acc = account_obj.browse(cr, uid, [rec_res_id])
pay_obj_acc = account_obj.browse(cr, uid, [pay_res_id])
p.property_account_receivable = rec_obj_acc[0]
p.property_account_payable = pay_obj_acc[0]
if type in ('out_invoice', 'out_refund'):
acc_id = p.property_account_receivable.id
partner_payment_term = p.property_payment_term and p.property_payment_term.id or False
else:
acc_id = p.property_account_payable.id
partner_payment_term = p.property_supplier_payment_term and p.property_supplier_payment_term.id or False
fiscal_position = p.property_account_position and p.property_account_position.id or False
if p.bank_ids:
bank_id = p.bank_ids[0].id
result = {'value': {
'account_id': acc_id,
'payment_term': partner_payment_term,
'fiscal_position': fiscal_position
}
}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank_id'] = bank_id
if payment_term != partner_payment_term:
if partner_payment_term:
to_update = self.onchange_payment_term_date_invoice(
cr, uid, ids, partner_payment_term, date_invoice)
result['value'].update(to_update['value'])
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
def onchange_journal_id(self, cr, uid, ids, journal_id=False, context=None):
result = {}
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
currency_id = journal.currency and journal.currency.id or journal.company_id.currency_id.id
company_id = journal.company_id.id
result = {'value': {
'currency_id': currency_id,
'company_id': company_id,
}
}
return result
def onchange_payment_term_date_invoice(self, cr, uid, ids, payment_term_id, date_invoice):
res = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not date_invoice:
date_invoice = time.strftime('%Y-%m-%d')
if not payment_term_id:
inv = self.browse(cr, uid, ids[0])
#To make sure the invoice due date should contain due date which is entered by user when there is no payment term defined
return {'value':{'date_due': inv.date_due and inv.date_due or date_invoice}}
pterm_list = self.pool.get('account.payment.term').compute(cr, uid, payment_term_id, value=1, date_ref=date_invoice)
if pterm_list:
pterm_list = [line[0] for line in pterm_list]
pterm_list.sort()
res = {'value':{'date_due': pterm_list[-1]}}
else:
raise osv.except_osv(_('Insufficient Data!'), _('The payment term of supplier does not have a payment term line.'))
return res
def onchange_invoice_line(self, cr, uid, ids, lines):
return {}
def onchange_partner_bank(self, cursor, user, ids, partner_bank_id=False):
return {'value': {}}
def onchange_company_id(self, cr, uid, ids, company_id, part_id, type, invoice_line, currency_id):
#TODO: add the missing context parameter when forward-porting in trunk so we can remove
# this hack!
context = self.pool['res.users'].context_get(cr, uid)
val = {}
dom = {}
obj_journal = self.pool.get('account.journal')
account_obj = self.pool.get('account.account')
inv_line_obj = self.pool.get('account.invoice.line')
if company_id and part_id and type:
acc_id = False
partner_obj = self.pool.get('res.partner').browse(cr,uid,part_id)
if partner_obj.property_account_payable and partner_obj.property_account_receivable:
if partner_obj.property_account_payable.company_id.id != company_id and partner_obj.property_account_receivable.company_id.id != company_id:
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr, uid, rec_pro_id, ['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr, uid, pay_pro_id, ['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.'))
if type in ('out_invoice', 'out_refund'):
acc_id = rec_res_id
else:
acc_id = pay_res_id
val= {'account_id': acc_id}
if ids:
if company_id:
inv_obj = self.browse(cr,uid,ids)
for line in inv_obj[0].invoice_line:
if line.account_id:
if line.account_id.company_id.id != company_id:
result_id = account_obj.search(cr, uid, [('name','=',line.account_id.name),('company_id','=',company_id)])
if not result_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.'))
inv_line_obj.write(cr, uid, [line.id], {'account_id': result_id[-1]})
else:
if invoice_line:
for inv_line in invoice_line:
obj_l = account_obj.browse(cr, uid, inv_line[2]['account_id'])
if obj_l.company_id.id != company_id:
raise osv.except_osv(_('Configuration Error!'),
_('Invoice line account\'s company and invoice\'s company does not match.'))
else:
continue
if company_id and type:
journal_mapping = {
'out_invoice': 'sale',
'out_refund': 'sale_refund',
'in_refund': 'purchase_refund',
'in_invoice': 'purchase',
}
journal_type = journal_mapping[type]
journal_ids = obj_journal.search(cr, uid, [('company_id','=',company_id), ('type', '=', journal_type)])
if journal_ids:
val['journal_id'] = journal_ids[0]
ir_values_obj = self.pool.get('ir.values')
res_journal_default = ir_values_obj.get(cr, uid, 'default', 'type=%s' % (type), ['account.invoice'])
for r in res_journal_default:
if r[1] == 'journal_id' and r[2] in journal_ids:
val['journal_id'] = r[2]
if not val.get('journal_id', False):
journal_type_map = dict(obj_journal._columns['type'].selection)
journal_type_label = self.pool['ir.translation']._get_source(cr, uid, None, ('code','selection'),
context.get('lang'),
journal_type_map.get(journal_type))
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.') % ('"%s"' % journal_type_label))
dom = {'journal_id': [('id', 'in', journal_ids)]}
else:
journal_ids = obj_journal.search(cr, uid, [])
return {'value': val, 'domain': dom}
# go from canceled state to draft state
def action_cancel_draft(self, cr, uid, ids, *args):
# cambio para poder eliminar las facturas validadas -> cambiado self.write
#self.write(cr, uid, ids, {'state':'draft'})
self.write(cr, uid, ids, {'state':'draft', 'internal_number':False ,'move_id':False})
wf_service = netsvc.LocalService("workflow")
for inv_id in ids:
wf_service.trg_delete(uid, 'account.invoice', inv_id, cr)
wf_service.trg_create(uid, 'account.invoice', inv_id, cr)
return True
# Workflow stuff
#################
# return the ids of the move lines which has the same account than the invoice
# whose id is in ids
def move_line_id_payment_get(self, cr, uid, ids, *args):
if not ids: return []
result = self.move_line_id_payment_gets(cr, uid, ids, *args)
return result.get(ids[0], [])
def move_line_id_payment_gets(self, cr, uid, ids, *args):
res = {}
if not ids: return res
cr.execute('SELECT i.id, l.id '\
'FROM account_move_line l '\
'LEFT JOIN account_invoice i ON (i.move_id=l.move_id) '\
'WHERE i.id IN %s '\
'AND l.account_id=i.account_id',
(tuple(ids),))
for r in cr.fetchall():
res.setdefault(r[0], [])
res[r[0]].append( r[1] )
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({
'state':'draft',
'number':False,
'move_id':False,
'move_name':False,
'internal_number': False,
'period_id': False,
'sent': False,
})
if 'date_invoice' not in default:
default.update({
'date_invoice':False
})
if 'date_due' not in default:
default.update({
'date_due':False
})
return super(account_invoice, self).copy(cr, uid, id, default, context)
def test_paid(self, cr, uid, ids, *args):
res = self.move_line_id_payment_get(cr, uid, ids)
if not res:
return False
ok = True
for id in res:
cr.execute('select reconcile_id from account_move_line where id=%s', (id,))
ok = ok and bool(cr.fetchone()[0])
return ok
def button_reset_taxes(self, cr, uid, ids, context=None):
if context is None:
context = {}
ctx = context.copy()
ait_obj = self.pool.get('account.invoice.tax')
for id in ids:
cr.execute("DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (id,))
partner = self.browse(cr, uid, id, context=ctx).partner_id
if partner.lang:
ctx.update({'lang': partner.lang})
for taxe in ait_obj.compute(cr, uid, id, context=ctx).values():
ait_obj.create(cr, uid, taxe)
# Update the stored value (fields.function), so we write to trigger recompute
self.pool.get('account.invoice').write(cr, uid, ids, {'invoice_line':[]}, context=ctx)
return True
def button_compute(self, cr, uid, ids, context=None, set_total=False):
self.button_reset_taxes(cr, uid, ids, context)
for inv in self.browse(cr, uid, ids, context=context):
if set_total:
self.pool.get('account.invoice').write(cr, uid, [inv.id], {'check_total': inv.amount_total})
return True
def _convert_ref(self, cr, uid, ref):
return (ref or '').replace('/','')
def _get_analytic_lines(self, cr, uid, id, context=None):
if context is None:
context = {}
inv = self.browse(cr, uid, id)
cur_obj = self.pool.get('res.currency')
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = self.pool.get('account.invoice.line').move_line_get(cr, uid, inv.id, context=context)
for il in iml:
if il['account_analytic_id']:
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = self._convert_ref(cr, uid, inv.number)
if not inv.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (inv.journal_id.name,))
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': inv['date_invoice'],
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context={'date': inv.date_invoice}) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': inv.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
def action_date_assign(self, cr, uid, ids, *args):
for inv in self.browse(cr, uid, ids):
res = self.onchange_payment_term_date_invoice(cr, uid, inv.id, inv.payment_term.id, inv.date_invoice)
if res and res['value']:
self.write(cr, uid, [inv.id], res['value'])
return True
def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines):
"""finalize_invoice_move_lines(cr, uid, invoice, move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and possibly alter the
move lines to be created by an invoice, for special cases.
:param invoice_browse: browsable record of the invoice that is generating the move lines
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
return move_lines
def check_tax_lines(self, cr, uid, inv, compute_taxes, ait_obj):
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id
if not inv.tax_line:
for tax in compute_taxes.values():
ait_obj.create(cr, uid, tax)
else:
tax_key = []
for tax in inv.tax_line:
if tax.manual:
continue
key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id, tax.account_analytic_id.id)
tax_key.append(key)
if not key in compute_taxes:
raise osv.except_osv(_('Warning!'), _('Global taxes defined, but they are not in invoice lines !'))
base = compute_taxes[key]['base']
if abs(base - tax.base) > company_currency.rounding:
raise osv.except_osv(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.'))
for key in compute_taxes:
if not key in tax_key:
raise osv.except_osv(_('Warning!'), _('Taxes are missing!\nClick on compute button.'))
def compute_invoice_totals(self, cr, uid, inv, company_currency, ref, invoice_move_lines, context=None):
if context is None:
context={}
total = 0
total_currency = 0
cur_obj = self.pool.get('res.currency')
for i in invoice_move_lines:
if inv.currency_id.id != company_currency:
context.update({'date': inv.date_invoice or time.strftime('%Y-%m-%d')})
i['currency_id'] = inv.currency_id.id
i['amount_currency'] = i['price']
i['price'] = cur_obj.compute(cr, uid, inv.currency_id.id,
company_currency, i['price'],
context=context)
else:
i['amount_currency'] = False
i['currency_id'] = False
i['ref'] = ref
if inv.type in ('out_invoice','in_refund'):
total += i['price']
total_currency += i['amount_currency'] or i['price']
i['price'] = - i['price']
else:
total -= i['price']
total_currency -= i['amount_currency'] or i['price']
return total, total_currency, invoice_move_lines
def inv_line_characteristic_hashcode(self, invoice, invoice_line):
"""Overridable hashcode generation for invoice lines. Lines having the same hashcode
will be grouped together if the journal has the 'group line' option. Of course a module
can add fields to invoice lines that would need to be tested too before merging lines
or not."""
return "%s-%s-%s-%s-%s"%(
invoice_line['account_id'],
invoice_line.get('tax_code_id',"False"),
invoice_line.get('product_id',"False"),
invoice_line.get('analytic_account_id',"False"),
invoice_line.get('date_maturity',"False"))
def group_lines(self, cr, uid, iml, line, inv):
"""Merge account move lines (and hence analytic lines) if invoice line hashcodes are equals"""
if inv.journal_id.group_invoice_lines:
line2 = {}
for x, y, l in line:
tmp = self.inv_line_characteristic_hashcode(inv, l)
if tmp in line2:
am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])
line2[tmp]['debit'] = (am > 0) and am or 0.0
line2[tmp]['credit'] = (am < 0) and -am or 0.0
line2[tmp]['tax_amount'] += l['tax_amount']
line2[tmp]['analytic_lines'] += l['analytic_lines']
else:
line2[tmp] = l
line = []
for key, val in line2.items():
line.append((0,0,val))
return line
def action_move_create(self, cr, uid, ids, context=None):
"""Creates invoice related analytics and financial move lines"""
ait_obj = self.pool.get('account.invoice.tax')
cur_obj = self.pool.get('res.currency')
period_obj = self.pool.get('account.period')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
if context is None:
context = {}
for inv in self.browse(cr, uid, ids, context=context):
if not inv.journal_id.sequence_id:
raise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line:
raise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = context.copy()
ctx.update({'lang': inv.partner_id.lang})
if not inv.date_invoice:
self.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
# create the analytical lines
# one move line per invoice line
iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)
# check if taxes are all computed
compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)
self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)
# I disabled the check_total feature
group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]
group_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context)
if group_check_total and uid in [x.id for x in group_check_total.users]:
if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):
raise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\nThe encoded total does not match the computed total.'))
if inv.payment_term:
total_fixed = total_percent = 0
for line in inv.payment_term.line_ids:
if line.value == 'fixed':
total_fixed += line.value_amount
if line.value == 'procent':
total_percent += line.value_amount
total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)
if (total_fixed + total_percent) > 100:
raise osv.except_osv(_('Error!'), _("Cannot create the invoice.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'."))
# one move line per tax line
iml += ait_obj.move_line_get(cr, uid, inv.id)
entry_type = ''
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
entry_type = 'journal_pur_voucher'
if inv.type == 'in_refund':
entry_type = 'cont_voucher'
else:
ref = self._convert_ref(cr, uid, inv.number)
entry_type = 'journal_sale_vou'
if inv.type == 'out_refund':
entry_type = 'cont_voucher'
diff_currency_p = inv.currency_id.id <> company_currency
# create one move line for the total and possibly adjust the other lines amount
total = 0
total_currency = 0
total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)
acc_id = inv.account_id.id
name = inv['name'] or inv['supplier_invoice_number'] or '/'
totlines = False
if inv.payment_term:
totlines = payment_term_obj.compute(cr,
uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)
if totlines:
res_amount_currency = total_currency
i = 0
ctx.update({'date': inv.date_invoice})
for t in totlines:
if inv.currency_id.id != company_currency:
amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)
else:
amount_currency = False
# last line add the diff
res_amount_currency -= amount_currency or 0
i += 1
if i == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': acc_id,
'date_maturity': t[0],
'amount_currency': diff_currency_p \
and amount_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref,
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': acc_id,
'date_maturity': inv.date_due or False,
'amount_currency': diff_currency_p \
and total_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref
})
date = inv.date_invoice or time.strftime('%Y-%m-%d')
part = self.pool.get("res.partner")._find_accounting_partner(inv.partner_id)
line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml)
line = self.group_lines(cr, uid, iml, line, inv)
journal_id = inv.journal_id.id
journal = journal_obj.browse(cr, uid, journal_id, context=ctx)
if journal.centralisation:
raise osv.except_osv(_('User Error!'),
_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))
line = self.finalize_invoice_move_lines(cr, uid, inv, line)
move = {
'ref': inv.reference and inv.reference or inv.name,
'line_id': line,
'journal_id': journal_id,
'date': date,
'narration': inv.comment,
'company_id': inv.company_id.id,
}
period_id = inv.period_id and inv.period_id.id or False
ctx.update(company_id=inv.company_id.id,
account_period_prefer_normal=True)
if not period_id:
period_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)
period_id = period_ids and period_ids[0] or False
if period_id:
move['period_id'] = period_id
for i in line:
i[2]['period_id'] = period_id
ctx.update(invoice=inv)
move_id = move_obj.create(cr, uid, move, context=ctx)
new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name
# make the invoice point to that move
self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move_obj.post(cr, uid, [move_id], context=ctx)
self._log_event(cr, uid, ids)
return True
def invoice_validate(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'open'}, context=context)
return True
def line_get_convert(self, cr, uid, x, part, date, context=None):
return {
'date_maturity': x.get('date_maturity', False),
'partner_id': part,
'name': x['name'][:64],
'date': date,
'debit': x['price']>0 and x['price'],
'credit': x['price']<0 and -x['price'],
'account_id': x['account_id'],
'analytic_lines': x.get('analytic_lines', []),
'amount_currency': x['price']>0 and abs(x.get('amount_currency', False)) or -abs(x.get('amount_currency', False)),
'currency_id': x.get('currency_id', False),
'tax_code_id': x.get('tax_code_id', False),
'tax_amount': x.get('tax_amount', False),
'ref': x.get('ref', False),
'quantity': x.get('quantity',1.00),
'product_id': x.get('product_id', False),
'product_uom_id': x.get('uos_id', False),
'analytic_account_id': x.get('account_analytic_id', False),
}
def action_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
#TODO: not correct fix but required a frech values before reading it.
self.write(cr, uid, ids, {})
for obj_inv in self.browse(cr, uid, ids, context=context):
invtype = obj_inv.type
number = obj_inv.number
move_id = obj_inv.move_id and obj_inv.move_id.id or False
reference = obj_inv.reference or ''
self.write(cr, uid, ids, {'internal_number': number})
if invtype in ('in_invoice', 'in_refund'):
if not reference:
ref = self._convert_ref(cr, uid, number)
else:
ref = reference
else:
ref = self._convert_ref(cr, uid, number)
cr.execute('UPDATE account_move SET ref=%s ' \
'WHERE id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_move_line SET ref=%s ' \
'WHERE move_id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_analytic_line SET ref=%s ' \
'FROM account_move_line ' \
'WHERE account_move_line.move_id = %s ' \
'AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
return True
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
account_move_obj = self.pool.get('account.move')
invoices = self.read(cr, uid, ids, ['move_id', 'payment_ids'])
move_ids = [] # ones that we will need to remove
for i in invoices:
if i['move_id']:
move_ids.append(i['move_id'][0])
if i['payment_ids']:
account_move_line_obj = self.pool.get('account.move.line')
pay_ids = account_move_line_obj.browse(cr, uid, i['payment_ids'])
for move_line in pay_ids:
if move_line.reconcile_partial_id and move_line.reconcile_partial_id.line_partial_ids:
raise osv.except_osv(_('Error!'), _('You cannot cancel an invoice which is partially paid. You need to unreconcile related payment entries first.'))
# First, set the invoices as cancelled and detach the move ids
self.write(cr, uid, ids, {'state':'cancel', 'move_id':False})
if move_ids:
# second, invalidate the move(s)
account_move_obj.button_cancel(cr, uid, move_ids, context=context)
# delete the move this invoice was pointing to
# Note that the corresponding move_lines and move_reconciles
# will be automatically deleted too
account_move_obj.unlink(cr, uid, move_ids, context=context)
self._log_event(cr, uid, ids, -1.0, 'Cancel Invoice')
return True
###################
def list_distinct_taxes(self, cr, uid, ids):
invoices = self.browse(cr, uid, ids)
taxes = {}
for inv in invoices:
for tax in inv.tax_line:
if not tax['name'] in taxes:
taxes[tax['name']] = {'name': tax['name']}
return taxes.values()
def _log_event(self, cr, uid, ids, factor=1.0, name='Open Invoice'):
#TODO: implement messages system
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
types = {
'out_invoice': _('Invoice'),
'in_invoice': _('Supplier Invoice'),
'out_refund': _('Refund'),
'in_refund': _('Supplier Refund'),
}
return [(r['id'], '%s %s' % (r['number'] or types[r['type']], r['name'] or '')) for r in self.read(cr, uid, ids, ['type', 'number', 'name'], context, load='_classic_write')]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if context is None:
context = {}
ids = []
if name:
ids = self.search(cr, user, [('number','=',name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)] + args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
def _refund_cleanup_lines(self, cr, uid, lines, context=None):
"""Convert records to dict of values suitable for one2many line creation
:param list(browse_record) lines: records to convert
:return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...]
"""
clean_lines = []
for line in lines:
clean_line = {}
for field in line._all_columns.keys():
if line._all_columns[field].column._type == 'many2one':
clean_line[field] = line[field].id
elif line._all_columns[field].column._type not in ['many2many','one2many']:
clean_line[field] = line[field]
elif field == 'invoice_line_tax_id':
tax_list = []
for tax in line[field]:
tax_list.append(tax.id)
clean_line[field] = [(6,0, tax_list)]
clean_lines.append(clean_line)
return map(lambda x: (0,0,x), clean_lines)
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
"""Prepare the dict of values to create the new refund from the invoice.
This method may be overridden to implement custom
refund generation (making sure to call super() to establish
a clean extension chain).
:param integer invoice_id: id of the invoice to refund
:param dict invoice: read of the invoice to refund
:param string date: refund creation date from the wizard
:param integer period_id: force account.period from the wizard
:param string description: description of the refund from the wizard
:param integer journal_id: account.journal from the wizard
:return: dict of value to create() the refund
"""
obj_journal = self.pool.get('account.journal')
type_dict = {
'out_invoice': 'out_refund', # Customer Invoice
'in_invoice': 'in_refund', # Supplier Invoice
'out_refund': 'out_invoice', # Customer Refund
'in_refund': 'in_invoice', # Supplier Refund
}
invoice_data = {}
for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id',
'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']:
if invoice._all_columns[field].column._type == 'many2one':
invoice_data[field] = invoice[field].id
else:
invoice_data[field] = invoice[field] if invoice[field] else False
invoice_lines = self._refund_cleanup_lines(cr, uid, invoice.invoice_line, context=context)
tax_lines = filter(lambda l: l['manual'], invoice.tax_line)
tax_lines = self._refund_cleanup_lines(cr, uid, tax_lines, context=context)
if journal_id:
refund_journal_ids = [journal_id]
elif invoice['type'] == 'in_invoice':
refund_journal_ids = obj_journal.search(cr, uid, [('type','=','purchase_refund')], context=context)
else:
refund_journal_ids = obj_journal.search(cr, uid, [('type','=','sale_refund')], context=context)
if not date:
date = time.strftime('%Y-%m-%d')
invoice_data.update({
'type': type_dict[invoice['type']],
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line': invoice_lines,
'tax_line': tax_lines,
'journal_id': refund_journal_ids and refund_journal_ids[0] or False,
})
if period_id:
invoice_data['period_id'] = period_id
if description:
invoice_data['name'] = description
return invoice_data
def refund(self, cr, uid, ids, date=None, period_id=None, description=None, journal_id=None, context=None):
new_ids = []
for invoice in self.browse(cr, uid, ids, context=context):
invoice = self._prepare_refund(cr, uid, invoice,
date=date,
period_id=period_id,
description=description,
journal_id=journal_id,
context=context)
# create the new invoice
new_ids.append(self.create(cr, uid, invoice, context=context))
return new_ids
def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id, writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''):
if context is None:
context = {}
#TODO check if we can use different period for payment and the writeoff line
assert len(ids)==1, "Can only pay one invoice at a time."
invoice = self.browse(cr, uid, ids[0], context=context)
src_account_id = invoice.account_id.id
# Take the seq as name for move
types = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
direction = types[invoice.type]
#take the choosen date
if 'date_p' in context and context['date_p']:
date=context['date_p']
else:
date=time.strftime('%Y-%m-%d')
# Take the amount in currency and the currency of the payment
if 'amount_currency' in context and context['amount_currency'] and 'currency_id' in context and context['currency_id']:
amount_currency = context['amount_currency']
currency_id = context['currency_id']
else:
amount_currency = False
currency_id = False
pay_journal = self.pool.get('account.journal').read(cr, uid, pay_journal_id, ['type'], context=context)
if invoice.type in ('in_invoice', 'out_invoice'):
if pay_journal['type'] == 'bank':
entry_type = 'bank_pay_voucher' # Bank payment
else:
entry_type = 'pay_voucher' # Cash payment
else:
entry_type = 'cont_voucher'
if invoice.type in ('in_invoice', 'in_refund'):
ref = invoice.reference
else:
ref = self._convert_ref(cr, uid, invoice.number)
partner = self.pool['res.partner']._find_accounting_partner(invoice.partner_id)
# Pay attention to the sign for both debit/credit AND amount_currency
l1 = {
'debit': direction * pay_amount>0 and direction * pay_amount,
'credit': direction * pay_amount<0 and - direction * pay_amount,
'account_id': src_account_id,
'partner_id': partner.id,
'ref':ref,
'date': date,
'currency_id':currency_id,
'amount_currency':amount_currency and direction * amount_currency or 0.0,
'company_id': invoice.company_id.id,
}
l2 = {
'debit': direction * pay_amount<0 and - direction * pay_amount,
'credit': direction * pay_amount>0 and direction * pay_amount,
'account_id': pay_account_id,
'partner_id': partner.id,
'ref':ref,
'date': date,
'currency_id':currency_id,
'amount_currency':amount_currency and - direction * amount_currency or 0.0,
'company_id': invoice.company_id.id,
}
if not name:
name = invoice.invoice_line and invoice.invoice_line[0].name or invoice.number
l1['name'] = name
l2['name'] = name
lines = [(0, 0, l1), (0, 0, l2)]
move = {'ref': ref, 'line_id': lines, 'journal_id': pay_journal_id, 'period_id': period_id, 'date': date}
move_id = self.pool.get('account.move').create(cr, uid, move, context=context)
line_ids = []
total = 0.0
line = self.pool.get('account.move.line')
move_ids = [move_id,]
if invoice.move_id:
move_ids.append(invoice.move_id.id)
cr.execute('SELECT id FROM account_move_line '\
'WHERE move_id IN %s',
((move_id, invoice.move_id.id),))
lines = line.browse(cr, uid, map(lambda x: x[0], cr.fetchall()) )
for l in lines+invoice.payment_ids:
if l.account_id.id == src_account_id:
line_ids.append(l.id)
total += (l.debit or 0.0) - (l.credit or 0.0)
inv_id, name = self.name_get(cr, uid, [invoice.id], context=context)[0]
if (not round(total,self.pool.get('decimal.precision').precision_get(cr, uid, 'Account'))) or writeoff_acc_id:
self.pool.get('account.move.line').reconcile(cr, uid, line_ids, 'manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context)
else:
code = invoice.currency_id.symbol
# TODO: use currency's formatting function
msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \
(pay_amount, code, invoice.amount_total, code, total, code)
self.message_post(cr, uid, [inv_id], body=msg, context=context)
self.pool.get('account.move.line').reconcile_partial(cr, uid, line_ids, 'manual', context)
# Update the stored value (fields.function), so we write to trigger recompute
self.pool.get('account.invoice').write(cr, uid, ids, {}, context=context)
return True
class account_invoice_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):
res = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids):
price = line.price_unit * (1-(line.discount or 0.0)/100.0)
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, partner=line.invoice_id.partner_id)
res[line.id] = taxes['total']
if line.invoice_id:
cur = line.invoice_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
def _price_unit_default(self, cr, uid, context=None):
if context is None:
context = {}
if context.get('check_total', False):
t = context['check_total']
for l in context.get('invoice_line', {}):
if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]:
tax_obj = self.pool.get('account.tax')
p = l[2].get('price_unit', 0) * (1-l[2].get('discount', 0)/100.0)
t = t - (p * l[2].get('quantity'))
taxes = l[2].get('invoice_line_tax_id')
if len(taxes[0]) >= 3 and taxes[0][2]:
taxes = tax_obj.browse(cr, uid, list(taxes[0][2]))
for tax in tax_obj.compute_all(cr, uid, taxes, p,l[2].get('quantity'), l[2].get('product_id', False), context.get('partner_id', False))['taxes']:
t = t - tax['amount']
return t
return 0
_name = "account.invoice.line"
_description = "Invoice Line"
_order = "invoice_id,sequence,id"
_columns = {
'name': fields.text('Description', required=True),
'origin': fields.char('Source Document', size=256, help="Reference of the document that produced this invoice."),
'sequence': fields.integer('Sequence', help="Gives the sequence of this line when displaying the invoice."),
'invoice_id': fields.many2one('account.invoice', 'Invoice Reference', ondelete='cascade', select=True),
'uos_id': fields.many2one('product.uom', 'Unit of Measure', ondelete='set null', select=True),
'product_id': fields.many2one('product.product', 'Product', ondelete='set null', select=True),
'account_id': fields.many2one('account.account', 'Account', required=True, domain=[('type','<>','view'), ('type', '<>', 'closed')], help="The income or expense account related to the selected product."),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Amount', type="float",
digits_compute= dp.get_precision('Account'), store=True),
'quantity': fields.float('Quantity', digits_compute= dp.get_precision('Product Unit of Measure'), required=True),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount')),
'invoice_line_tax_id': fields.many2many('account.tax', 'account_invoice_line_tax', 'invoice_line_id', 'tax_id', 'Taxes', domain=[('parent_id','=',False)]),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('invoice_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'partner_id': fields.related('invoice_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True)
}
def _default_account_id(self, cr, uid, context=None):
# XXX this gets the default account for the user's company,
# it should get the default account for the invoice's company
# however, the invoice's company does not reach this point
if context is None:
context = {}
if context.get('type') in ('out_invoice','out_refund'):
prop = self.pool.get('ir.property').get(cr, uid, 'property_account_income_categ', 'product.category', context=context)
else:
prop = self.pool.get('ir.property').get(cr, uid, 'property_account_expense_categ', 'product.category', context=context)
return prop and prop.id or False
_defaults = {
'quantity': 1,
'discount': 0.0,
'price_unit': _price_unit_default,
'account_id': _default_account_id,
'sequence': 10,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
res = super(account_invoice_line,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if context.get('type', False):
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='product_id']"):
if context['type'] in ('in_invoice', 'in_refund'):
node.set('domain', "[('purchase_ok', '=', True)]")
else:
node.set('domain', "[('sale_ok', '=', True)]")
res['arch'] = etree.tostring(doc)
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id, 'force_company': company_id})
if not partner_id:
raise osv.except_osv(_('No Partner Defined!'),_("You must first select a partner!") )
if not product:
if type in ('in_invoice', 'in_refund'):
return {'value': {}, 'domain':{'product_uom':[]}}
else:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
fpos_obj = self.pool.get('account.fiscal.position')
fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False
if part.lang:
context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type in ('out_invoice','out_refund'):
a = res.property_account_income.id
if not a:
a = res.categ_id.property_account_income_categ.id
else:
a = res.property_account_expense.id
if not a:
a = res.categ_id.property_account_expense_categ.id
a = fpos_obj.map_account(cr, uid, fpos, a)
if a:
result['account_id'] = a
if type in ('out_invoice', 'out_refund'):
taxes = res.taxes_id and res.taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
else:
taxes = res.supplier_taxes_id and res.supplier_taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes)
if type in ('in_invoice', 'in_refund'):
result.update( {'price_unit': price_unit or res.standard_price,'invoice_line_tax_id': tax_id} )
else:
result.update({'price_unit': res.list_price, 'invoice_line_tax_id': tax_id})
result['name'] = res.partner_ref
result['uos_id'] = uom_id or res.uom_id.id
if res.description:
result['name'] += '\n'+res.description
domain = {'uos_id':[('category_id','=',res.uom_id.category_id.id)]}
res_final = {'value':result, 'domain':domain}
if not company_id or not currency_id:
return res_final
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
currency = self.pool.get('res.currency').browse(cr, uid, currency_id, context=context)
if company.currency_id.id != currency.id:
if type in ('in_invoice', 'in_refund'):
res_final['value']['price_unit'] = res.standard_price
new_price = res_final['value']['price_unit'] * currency.rate
res_final['value']['price_unit'] = new_price
if result['uos_id'] and result['uos_id'] != res.uom_id.id:
selected_uom = self.pool.get('product.uom').browse(cr, uid, result['uos_id'], context=context)
new_price = self.pool.get('product.uom')._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uos_id'])
res_final['value']['price_unit'] = new_price
return res_final
def uos_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id})
warning = {}
res = self.product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, currency_id, context=context)
if not uom:
res['value']['price_unit'] = 0.0
if product and uom:
prod = self.pool.get('product.product').browse(cr, uid, product, context=context)
prod_uom = self.pool.get('product.uom').browse(cr, uid, uom, context=context)
if prod.uom_id.category_id.id != prod_uom.category_id.id:
warning = {
'title': _('Warning!'),
'message': _('The selected unit of measure is not compatible with the unit of measure of the product.')
}
res['value'].update({'uos_id': prod.uom_id.id})
return {'value': res['value'], 'warning': warning}
return res
def move_line_get(self, cr, uid, invoice_id, context=None):
res = []
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
if context is None:
context = {}
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
for line in inv.invoice_line:
mres = self.move_line_get_item(cr, uid, line, context)
if not mres:
continue
res.append(mres)
tax_code_found= False
for tax in tax_obj.compute_all(cr, uid, line.invoice_line_tax_id,
(line.price_unit * (1.0 - (line['discount'] or 0.0) / 100.0)),
line.quantity, line.product_id,
inv.partner_id)['taxes']:
if inv.type in ('out_invoice', 'in_invoice'):
tax_code_id = tax['base_code_id']
tax_amount = line.price_subtotal * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = line.price_subtotal * tax['ref_base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(self.move_line_get_item(cr, uid, line, context))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, tax_amount, context={'date': inv.date_invoice})
return res
def move_line_get_item(self, cr, uid, line, context=None):
return {
'type':'src',
'name': line.name.split('\n')[0][:64],
'price_unit':line.price_unit,
'quantity':line.quantity,
'price':line.price_subtotal,
'account_id':line.account_id.id,
'product_id':line.product_id.id,
'uos_id':line.uos_id.id,
'account_analytic_id':line.account_analytic_id.id,
'taxes':line.invoice_line_tax_id,
}
#
# Set the tax field according to the account and the fiscal position
#
def onchange_account_id(self, cr, uid, ids, product_id, partner_id, inv_type, fposition_id, account_id):
if not account_id:
return {}
unique_tax_ids = []
fpos = fposition_id and self.pool.get('account.fiscal.position').browse(cr, uid, fposition_id) or False
account = self.pool.get('account.account').browse(cr, uid, account_id)
if not product_id:
taxes = account.tax_ids
unique_tax_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, taxes)
else:
product_change_result = self.product_id_change(cr, uid, ids, product_id, False, type=inv_type,
partner_id=partner_id, fposition_id=fposition_id,
company_id=account.company_id.id)
if product_change_result and 'value' in product_change_result and 'invoice_line_tax_id' in product_change_result['value']:
unique_tax_ids = product_change_result['value']['invoice_line_tax_id']
return {'value':{'invoice_line_tax_id': unique_tax_ids}}
account_invoice_line()
class account_invoice_tax(osv.osv):
_name = "account.invoice.tax"
_description = "Invoice Tax"
def _count_factor(self, cr, uid, ids, name, args, context=None):
res = {}
for invoice_tax in self.browse(cr, uid, ids, context=context):
res[invoice_tax.id] = {
'factor_base': 1.0,
'factor_tax': 1.0,
}
if invoice_tax.amount <> 0.0:
factor_tax = invoice_tax.tax_amount / invoice_tax.amount
res[invoice_tax.id]['factor_tax'] = factor_tax
if invoice_tax.base <> 0.0:
factor_base = invoice_tax.base_amount / invoice_tax.base
res[invoice_tax.id]['factor_base'] = factor_base
return res
_columns = {
'invoice_id': fields.many2one('account.invoice', 'Invoice Line', ondelete='cascade', select=True),
'name': fields.char('Tax Description', size=64, required=True),
'account_id': fields.many2one('account.account', 'Tax Account', required=True, domain=[('type','<>','view'),('type','<>','income'), ('type', '<>', 'closed')]),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'base': fields.float('Base', digits_compute=dp.get_precision('Account')),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'manual': fields.boolean('Manual'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of invoice tax."),
'base_code_id': fields.many2one('account.tax.code', 'Base Code', help="The account basis of the tax declaration."),
'base_amount': fields.float('Base Code Amount', digits_compute=dp.get_precision('Account')),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Code', help="The tax basis of the tax declaration."),
'tax_amount': fields.float('Tax Code Amount', digits_compute=dp.get_precision('Account')),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'factor_base': fields.function(_count_factor, string='Multipication factor for Base code', type='float', multi="all"),
'factor_tax': fields.function(_count_factor, string='Multipication factor Tax code', type='float', multi="all")
}
def base_change(self, cr, uid, ids, base, currency_id=False, company_id=False, date_invoice=False):
cur_obj = self.pool.get('res.currency')
company_obj = self.pool.get('res.company')
company_currency = False
factor = 1
if ids:
factor = self.read(cr, uid, ids[0], ['factor_base'])['factor_base']
if company_id:
company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0]
if currency_id and company_currency:
base = cur_obj.compute(cr, uid, currency_id, company_currency, base*factor, context={'date': date_invoice or time.strftime('%Y-%m-%d')}, round=False)
return {'value': {'base_amount':base}}
def amount_change(self, cr, uid, ids, amount, currency_id=False, company_id=False, date_invoice=False):
cur_obj = self.pool.get('res.currency')
company_obj = self.pool.get('res.company')
company_currency = False
factor = 1
if ids:
factor = self.read(cr, uid, ids[0], ['factor_tax'])['factor_tax']
if company_id:
company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0]
if currency_id and company_currency:
amount = cur_obj.compute(cr, uid, currency_id, company_currency, amount*factor, context={'date': date_invoice or time.strftime('%Y-%m-%d')}, round=False)
return {'value': {'tax_amount': amount}}
_order = 'sequence'
_defaults = {
'manual': 1,
'base_amount': 0.0,
'tax_amount': 0.0,
}
def compute(self, cr, uid, invoice_id, context=None):
tax_grouped = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
cur = inv.currency_id
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
for line in inv.invoice_line:
for tax in tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, (line.price_unit* (1-(line.discount or 0.0)/100.0)), line.quantity, line.product_id, inv.partner_id)['taxes']:
val={}
val['invoice_id'] = inv.id
val['name'] = tax['name']
val['amount'] = tax['amount']
val['manual'] = False
val['sequence'] = tax['sequence']
val['base'] = cur_obj.round(cr, uid, cur, tax['price_unit'] * line['quantity'])
if inv.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['ref_base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['ref_tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
key = (val['tax_code_id'], val['base_code_id'], val['account_id'], val['account_analytic_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = cur_obj.round(cr, uid, cur, t['base'])
t['amount'] = cur_obj.round(cr, uid, cur, t['amount'])
t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount'])
t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount'])
return tax_grouped
def move_line_get(self, cr, uid, invoice_id):
res = []
cr.execute('SELECT * FROM account_invoice_tax WHERE invoice_id=%s', (invoice_id,))
for t in cr.dictfetchall():
if not t['amount'] \
and not t['tax_code_id'] \
and not t['tax_amount']:
continue
res.append({
'type':'tax',
'name':t['name'],
'price_unit': t['amount'],
'quantity': 1,
'price': t['amount'] or 0.0,
'account_id': t['account_id'],
'tax_code_id': t['tax_code_id'],
'tax_amount': t['tax_amount'],
'account_analytic_id': t['account_analytic_id'],
})
return res
class res_partner(osv.osv):
""" Inherits partner and adds invoice information in the partner form """
_inherit = 'res.partner'
_columns = {
'invoice_ids': fields.one2many('account.invoice.line', 'partner_id', 'Invoices', readonly=True),
}
def _find_accounting_partner(self, partner):
'''
Find the partner for which the accounting entries will be created
'''
# FIXME: after 7.0, to replace by function field partner.commercial_partner_id
#if the chosen partner is not a company and has a parent company, use the parent for the journal entries
#because you want to invoice 'Agrolait, accounting department' but the journal items are for 'Agrolait'
while not partner.is_company and partner.parent_id:
partner = partner.parent_id
return partner
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({'invoice_ids' : []})
return super(res_partner, self).copy(cr, uid, id, default, context)
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'account.invoice' and context.get('default_res_id') and context.get('mark_invoice_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('account.invoice').write(cr, uid, [context['default_res_id']], {'sent': True}, context=context)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vdmann/cse-360-image-hosting-website
|
refs/heads/master
|
lib/python2.7/site-packages/django/utils/timezone.py
|
16
|
"""
Timezone-related classes and functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import sys
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.utils import six
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class ReferenceLocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
Kept identical to the reference version. Subclasses contain improvements.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
return _time.tzname[is_dst]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class LocalTimezone(ReferenceLocalTimezone):
"""
Slightly improved local time implementation focusing on correctness.
It still crashes on dates before 1970 or after 2038, but at least the
error message is helpful.
"""
def _isdst(self, dt):
try:
return super(LocalTimezone, self)._isdst(dt)
except (OverflowError, ValueError) as exc:
exc_type = type(exc)
exc_value = exc_type(
"Unsupported value: %r. You should install pytz." % dt)
exc_value.__cause__ = exc
six.reraise(exc_type, exc_value, sys.exc_info()[2])
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
# This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
|
hannespetur/SeqAnHTS
|
refs/heads/master
|
util/py_lib/seqan/__init__.py
|
12133432
| |
boblefrag/python-rest-api-framework
|
refs/heads/master
|
tests/controllers_test.py
|
1
|
from unittest import TestCase
import json
import time
from werkzeug.wrappers import BaseResponse
from werkzeug.test import Client
from app import ApiApp
from rest_api_framework.authentication import ApiKeyAuthentication
from rest_api_framework.datastore import PythonListDataStore
from rest_api_framework import models
from rest_api_framework.controllers import WSGIDispatcher
from rest_api_framework.ratelimit import RateLimit
class ApiModel(models.Model):
fields = [
models.StringPkField(name="id", required=True)
]
class SQLModel(models.Model):
fields = [models.IntegerField(name="age", required=True),
models.StringField(name="name", required=True),
models.PkField(name="id")
]
ressources = [
{"name": "bob",
"age": a,
"id": a
} for a in range(100)
]
ratelimit_ressources = [{"id": "azerty"}]
class RateLimitModel(models.Model):
fields = [models.StringPkField(name="id"),
models.IntegerField(name="quota"),
models.TimestampField(name="last_request")]
authentication = ApiKeyAuthentication(
PythonListDataStore(ratelimit_ressources,
ApiModel)
)
class RateLimitApiApp(ApiApp):
controller = {
"list_verbs": ["GET", "POST"],
"unique_verbs": ["GET", "PUT", "DELETE"],
"options": {"authentication": authentication,
"ratelimit":
RateLimit(
PythonListDataStore(ratelimit_ressources,
RateLimitModel),
interval=1, quota=2)
}
}
def controller_formater(view, data):
data["age"] = int(data["age"])
return data
class FormatedApp(ApiApp):
controller = {
"list_verbs": ["GET", "POST"],
"unique_verbs": ["GET", "PUT", "DELETE"],
"options": {"formaters": [controller_formater,
]
}
}
class TestRateLimit(TestCase):
def test_ratelimit(self):
client = Client(
WSGIDispatcher([RateLimitApiApp]),
response_wrapper=BaseResponse)
resp = client.get("/address/")
self.assertEqual(resp.status_code, 401)
resp = client.get("/address/?apikey=azerty")
self.assertEqual(resp.status_code, 200)
resp = client.get("/address/?apikey=azerty")
self.assertEqual(resp.status_code, 429)
time.sleep(1)
resp = client.get("/address/?apikey=azerty")
self.assertEqual(resp.status_code, 200)
def test_unconfigured_rate_limit(self):
class BadConfRateLimitApiApp(ApiApp):
controller = {
"list_verbs": ["GET", "POST"],
"unique_verbs": ["GET", "PUT", "DELETE"],
"options": {"ratelimit": RateLimit(
PythonListDataStore(ratelimit_ressources, RateLimitModel),
interval=1, quota=2)}
}
self.assertRaises(ValueError,
WSGIDispatcher,
[BadConfRateLimitApiApp],
)
class TestControllerFormaters(TestCase):
def test_create(self):
client = Client(WSGIDispatcher([FormatedApp]),
response_wrapper=BaseResponse)
resp = client.post("/address/",
data=json.dumps({'name': 'bob', 'age': "34"}))
self.assertEqual(resp.status_code, 201)
class TestSayHello(TestCase):
def test_say_hello(self):
client = Client(WSGIDispatcher([FormatedApp]),
response_wrapper=BaseResponse)
resp = client.get('/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(
{"version": "devel", "name": "PRAF"},
json.loads(resp.data)
)
|
isaacl/meliae
|
refs/heads/master
|
remove_expensive_references.py
|
3
|
#!/usr/bin/env python
# Copyright (C) 2009 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Remove expensive references.
This script takes 1 or two filenames and filters the first into either std out,
or the second filename.
"""
import os
import re
import sys
import time
from meliae import files, loader
def main(args):
import optparse
p = optparse.OptionParser(
'%prog INFILE [OUTFILE]')
opts, args = p.parse_args(args)
if len(args) > 2:
sys.stderr.write('We only support 2 filenames, not %d\n' % (len(args),))
return -1
if len(args) < 1:
sys.stderr.write("Must supply INFILE\n")
return -1
def source():
infile, cleanup = files.open_file(args[0])
for obj in loader.iter_objs(infile):
yield obj
cleanup()
if len(args) == 1:
outfile = sys.stdout
else:
outfile = open(args[1], 'wb')
for _, obj in loader.remove_expensive_references(source, show_progress=True):
outfile.write(obj.to_json() + '\n')
outfile.flush()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
aflaxman/mpld3
|
refs/heads/master
|
mpld3/test_plots/test_patches.py
|
19
|
"""Plot to test patches"""
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
import mpld3
def create_plot():
fig, ax = plt.subplots()
ax.grid(color='lightgray')
rcolor = lambda: np.random.random(3)
p = [patches.Arrow(0.75, 0.75, 0.5, 0.5),
patches.Circle((1, 2), 0.4),
patches.RegularPolygon((1, 3), 5, 0.4),
patches.Rectangle((1.6, 0.75), 0.8, 0.5),
patches.CirclePolygon((2, 2), 0.4),
patches.Polygon([[1.75, 3], [2, 3.25], [2.25, 3],
[2, 2.75], [1.75, 3]]),
patches.Wedge((3, 1), 0.4, 0, 270),
patches.Ellipse((3, 2), 0.6, 0.4),
patches.Arc((3, 3), 0.5, 0.5, 270, 90)]
for patch in p:
patch.set_facecolor(rcolor())
patch.set_edgecolor(rcolor())
patch.set_alpha(0.5)
patch.set_linewidth(2)
ax.add_patch(patch)
# add a static patch
ax.add_patch(patches.Rectangle((0.3, 0.4), 0.4, 0.4,
fc='yellow', ec='black', alpha=0.3,
transform=ax.transAxes))
# add a patch with facecolor=None
ax.add_patch(patches.Circle((4.0, 2.5), 0.4,
facecolor='none', edgecolor='k'))
# add a patch with edgecolor=None
ax.add_patch(patches.Circle((4.0, 1.5), 0.4, facecolor='#9999FF',
edgecolor='none', linewidth=2))
# make sure axes ratio is equal
ax.set_xlim(0.5, 0.5 + 3. * 4. / 3.)
ax.set_ylim(0.5, 3.5)
ax.set_title("Various Patches", size=16)
return fig
def test_patches():
fig = create_plot()
html = mpld3.fig_to_html(fig)
plt.close(fig)
if __name__ == "__main__":
mpld3.show(create_plot())
|
adykstra/mne-python
|
refs/heads/master
|
mne/viz/backends/_pysurfer_mayavi.py
|
1
|
"""
Core visualization operations based on Mayavi.
Actual implementation of _Renderer and _Projection classes.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import warnings
import numpy as np
from .base_renderer import _BaseRenderer
from ...surface import _normalize_vectors
from ...utils import (_import_mlab, _validate_type, SilenceStdout,
copy_base_doc_to_subclass_doc)
class _Projection(object):
"""Class storing projection information.
Attributes
----------
xy : array
Result of 2d projection of 3d data.
pts : Source
Mayavi source handle.
"""
def __init__(self, xy=None, pts=None):
"""Store input projection information into attributes."""
self.xy = xy
self.pts = pts
def visible(self, state):
"""Modify visibility attribute of the source."""
if self.pts is not None:
self.pts.visible = state
@copy_base_doc_to_subclass_doc
class _Renderer(_BaseRenderer):
"""Class managing rendering scene.
Attributes
----------
mlab: mayavi.mlab
Main Mayavi access point.
fig: mlab.Figure
Mayavi scene handle.
"""
def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.),
name=None, show=False):
self.mlab = _import_mlab()
if fig is None:
self.fig = _mlab_figure(figure=name, bgcolor=bgcolor, size=size)
else:
self.fig = fig
if show is False:
_toggle_mlab_render(self.fig, False)
def scene(self):
return self.fig
def set_interactive(self):
from tvtk.api import tvtk
if self.fig.scene is not None:
self.fig.scene.interactor.interactor_style = \
tvtk.InteractorStyleTerrain()
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, **kwargs):
with warnings.catch_warnings(record=True): # traits
surface = self.mlab.triangular_mesh(x, y, z, triangles,
color=color,
opacity=opacity,
figure=self.fig,
**kwargs)
surface.actor.property.shading = shading
surface.actor.property.backface_culling = backface_culling
return surface
def contour(self, surface, scalars, contours, line_width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None):
mesh = _create_mesh_surf(surface, self.fig, scalars=scalars)
with warnings.catch_warnings(record=True): # traits
cont = self.mlab.pipeline.contour_surface(
mesh, contours=contours, line_width=1.0, vmin=vmin, vmax=vmax,
opacity=opacity, figure=self.fig)
cont.module_manager.scalar_lut_manager.lut.table = colormap
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None, scalars=None,
backface_culling=False):
# Make a solid surface
mesh = _create_mesh_surf(surface, self.fig, scalars=scalars)
with warnings.catch_warnings(record=True): # traits
surface = self.mlab.pipeline.surface(
mesh, color=color, opacity=opacity, vmin=vmin, vmax=vmax,
figure=self.fig)
if colormap is not None:
surface.module_manager.scalar_lut_manager.lut.table = colormap
surface.actor.property.backface_culling = backface_culling
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False):
if center.ndim == 1:
x = center[0]
y = center[1]
z = center[2]
elif center.ndim == 2:
x = center[:, 0]
y = center[:, 1]
z = center[:, 2]
surface = self.mlab.points3d(x, y, z, color=color,
resolution=resolution,
scale_factor=scale, opacity=opacity,
figure=self.fig)
surface.actor.property.backface_culling = backface_culling
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False):
with warnings.catch_warnings(record=True): # traits
if mode == 'arrow':
self.mlab.quiver3d(x, y, z, u, v, w, mode=mode,
color=color, scale_factor=scale,
scale_mode=scale_mode,
resolution=resolution, scalars=scalars,
opacity=opacity, figure=self.fig)
elif mode == 'cone':
self.mlab.quiver3d(x, y, z, u, v, w, color=color,
mode=mode, scale_factor=scale,
opacity=opacity, figure=self.fig)
elif mode == 'cylinder':
quiv = self.mlab.quiver3d(x, y, z, u, v, w, mode=mode,
color=color, scale_factor=scale,
opacity=opacity, figure=self.fig)
quiv.glyph.glyph_source.glyph_source.height = glyph_height
quiv.glyph.glyph_source.glyph_source.center = glyph_center
quiv.glyph.glyph_source.glyph_source.resolution = \
glyph_resolution
quiv.actor.property.backface_culling = backface_culling
def text(self, x, y, text, width, color=(1.0, 1.0, 1.0)):
with warnings.catch_warnings(record=True): # traits
self.mlab.text(x, y, text, width=width, color=color,
figure=self.fig)
def show(self):
if self.fig is not None:
_toggle_mlab_render(self.fig, True)
def close(self):
self.mlab.close(self.fig)
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None):
with warnings.catch_warnings(record=True): # traits
with SilenceStdout(): # setting roll
self.mlab.view(azimuth, elevation, distance,
focalpoint=focalpoint, figure=self.fig)
def screenshot(self):
with warnings.catch_warnings(record=True): # traits
return self.mlab.screenshot(self.fig)
def project(self, xyz, ch_names):
xy = _3d_to_2d(self.fig, xyz)
xy = dict(zip(ch_names, xy))
pts = self.fig.children[-1]
return _Projection(xy=xy, pts=pts)
def _mlab_figure(**kwargs):
"""Create a Mayavi figure using our defaults."""
from mayavi import mlab
fig = mlab.figure(**kwargs)
# If using modern VTK/Mayavi, improve rendering with FXAA
if hasattr(getattr(fig.scene, 'renderer', None), 'use_fxaa'):
fig.scene.renderer.use_fxaa = True
return fig
def _toggle_mlab_render(fig, render):
mlab = _import_mlab()
if mlab.options.backend != 'test':
fig.scene.disable_render = not render
def _create_mesh_surf(surf, fig=None, scalars=None, vtk_normals=True):
"""Create Mayavi mesh from MNE surf."""
mlab = _import_mlab()
x, y, z = surf['rr'].T
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(
x, y, z, surf['tris'], scalars=scalars, figure=fig)
if vtk_normals:
mesh = mlab.pipeline.poly_data_normals(mesh)
mesh.filter.compute_cell_normals = False
mesh.filter.consistency = False
mesh.filter.non_manifold_traversal = False
mesh.filter.splitting = False
else:
# make absolutely sure these are normalized for Mayavi
nn = surf['nn'].copy()
_normalize_vectors(nn)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
return mesh
def _3d_to_2d(fig, xyz):
"""Convert 3d points to a 2d perspective using a Mayavi Scene."""
from mayavi.core.scene import Scene
_validate_type(fig, Scene, "fig", "Scene")
xyz = np.column_stack([xyz, np.ones(xyz.shape[0])])
# Transform points into 'unnormalized' view coordinates
comb_trans_mat = _get_world_to_view_matrix(fig.scene)
view_coords = np.dot(comb_trans_mat, xyz.T).T
# Divide through by the fourth element for normalized view coords
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# Transform from normalized view coordinates to display coordinates.
view_to_disp_mat = _get_view_to_display_matrix(fig.scene)
xy = np.dot(view_to_disp_mat, norm_view_coords.T).T
# Pull the first two columns since they're meaningful for 2d plotting
xy = xy[:, :2]
return xy
def _get_world_to_view_matrix(scene):
"""Return the 4x4 matrix to transform xyz space to the current view.
This is a concatenation of the model view and perspective transforms.
"""
from mayavi.core.ui.mayavi_scene import MayaviScene
from tvtk.pyface.tvtk_scene import TVTKScene
_validate_type(scene, (MayaviScene, TVTKScene), "scene",
"TVTKScene/MayaviScene")
cam = scene.camera
# The VTK method needs the aspect ratio and near and far
# clipping planes in order to return the proper transform.
scene_size = tuple(scene.get_size())
clip_range = cam.clipping_range
aspect_ratio = float(scene_size[0]) / scene_size[1]
# Get the vtk matrix object using the aspect ratio we defined
vtk_comb_trans_mat = cam.get_composite_projection_transform_matrix(
aspect_ratio, clip_range[0], clip_range[1])
vtk_comb_trans_mat = vtk_comb_trans_mat.to_array()
return vtk_comb_trans_mat
def _get_view_to_display_matrix(scene):
"""Return the 4x4 matrix to convert view coordinates to display coordinates.
It's assumed that the view should take up the entire window and that the
origin of the window is in the upper left corner.
""" # noqa: E501
from mayavi.core.ui.mayavi_scene import MayaviScene
from tvtk.pyface.tvtk_scene import TVTKScene
_validate_type(scene, (MayaviScene, TVTKScene), "scene",
"TVTKScene/MayaviScene")
# normalized view coordinates have the origin in the middle of the space
# so we need to scale by width and height of the display window and shift
# by half width and half height. The matrix accomplishes that.
x, y = tuple(scene.get_size())
view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0],
[0., -y / 2.0, 0., y / 2.0],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
return view_to_disp_mat
|
naomidb/polyjuice
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
setup(name='polyjuice',
version='0.1.0',
description='Anonymize dicom files.',
url='https://github.com/ctsit/polyjuice',
author='Naomi Braun and Ajantha Ramineni',
author_email='naomi.d.braun@gmail.com, ajantha.5779@gmail.com',
license='Apache 2.0',
packages=find_packages(),
install_requires=['docopt', 'pydicom==1.0.2', 'PyYaml'])
|
andredias/nikola
|
refs/heads/master
|
nikola/image_processing.py
|
4
|
# -*- coding: utf-8 -*-
# Copyright © 2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Process images."""
from __future__ import unicode_literals
import datetime
import os
import lxml
import re
import gzip
import piexif
from nikola import utils
Image = None
try:
from PIL import ExifTags, Image # NOQA
except ImportError:
try:
import ExifTags
import Image as _Image
Image = _Image
except ImportError:
pass
EXIF_TAG_NAMES = {}
class ImageProcessor(object):
"""Apply image operations."""
image_ext_list_builtin = ['.jpg', '.png', '.jpeg', '.gif', '.svg', '.svgz', '.bmp', '.tiff']
def _fill_exif_tag_names(self):
"""Connect EXIF tag names to numeric values."""
if not EXIF_TAG_NAMES:
for ifd in piexif.TAGS:
for tag, data in piexif.TAGS[ifd].items():
EXIF_TAG_NAMES[tag] = data['name']
def filter_exif(self, exif, whitelist):
"""Filter EXIF data as described in the documentation."""
# Scenario 1: keep everything
if whitelist == {'*': '*'}:
return exif
# Scenario 2: keep nothing
if whitelist == {}:
return None
# Scenario 3: keep some
self._fill_exif_tag_names()
exif = exif.copy() # Don't modify in-place, it's rude
for k in list(exif.keys()):
if type(exif[k]) != dict:
pass # At least thumbnails have no fields
elif k not in whitelist:
exif.pop(k) # Not whitelisted, remove
elif k in whitelist and whitelist[k] == '*':
# Fully whitelisted, keep all
pass
else:
# Partially whitelisted
for tag in list(exif[k].keys()):
if EXIF_TAG_NAMES[tag] not in whitelist[k]:
exif[k].pop(tag)
return exif or None
def resize_image(self, src, dst, max_size, bigger_panoramas=True, preserve_exif_data=False, exif_whitelist={}):
"""Make a copy of the image in the requested size."""
if not Image or os.path.splitext(src)[1] in ['.svg', '.svgz']:
self.resize_svg(src, dst, max_size, bigger_panoramas)
return
im = Image.open(src)
size = w, h = im.size
if w > max_size or h > max_size:
size = max_size, max_size
# Panoramas get larger thumbnails because they look *awful*
if bigger_panoramas and w > 2 * h:
size = min(w, max_size * 4), min(w, max_size * 4)
try:
exif = piexif.load(im.info["exif"])
except KeyError:
exif = None
# Inside this if, we can manipulate exif as much as
# we want/need and it will be preserved if required
if exif is not None:
# Rotate according to EXIF
value = exif['0th'].get(piexif.ImageIFD.Orientation, 1)
if value in (3, 4):
im = im.transpose(Image.ROTATE_180)
elif value in (5, 6):
im = im.transpose(Image.ROTATE_270)
elif value in (7, 8):
im = im.transpose(Image.ROTATE_90)
if value in (2, 4, 5, 7):
im = im.transpose(Image.FLIP_LEFT_RIGHT)
exif['0th'][piexif.ImageIFD.Orientation] = 1
try:
im.thumbnail(size, Image.ANTIALIAS)
if exif is not None and preserve_exif_data:
# Put right size in EXIF data
w, h = im.size
if '0th' in exif:
exif["0th"][piexif.ImageIFD.ImageWidth] = w
exif["0th"][piexif.ImageIFD.ImageLength] = h
if 'Exif' in exif:
exif["Exif"][piexif.ExifIFD.PixelXDimension] = w
exif["Exif"][piexif.ExifIFD.PixelYDimension] = h
# Filter EXIF data as required
exif = self.filter_exif(exif, exif_whitelist)
im.save(dst, exif=piexif.dump(exif))
else:
im.save(dst)
except Exception as e:
self.logger.warn("Can't process {0}, using original "
"image! ({1})".format(src, e))
utils.copy_file(src, dst)
def resize_svg(self, src, dst, max_size, bigger_panoramas):
"""Make a copy of an svg at the requested size."""
try:
# Resize svg based on viewport hacking.
# note that this can also lead to enlarged svgs
if src.endswith('.svgz'):
with gzip.GzipFile(src, 'rb') as op:
xml = op.read()
else:
with open(src, 'rb') as op:
xml = op.read()
tree = lxml.etree.XML(xml)
width = tree.attrib['width']
height = tree.attrib['height']
w = int(re.search("[0-9]+", width).group(0))
h = int(re.search("[0-9]+", height).group(0))
# calculate new size preserving aspect ratio.
ratio = float(w) / h
# Panoramas get larger thumbnails because they look *awful*
if bigger_panoramas and w > 2 * h:
max_size = max_size * 4
if w > h:
w = max_size
h = max_size / ratio
else:
w = max_size * ratio
h = max_size
w = int(w)
h = int(h)
tree.attrib.pop("width")
tree.attrib.pop("height")
tree.attrib['viewport'] = "0 0 %ipx %ipx" % (w, h)
if dst.endswith('.svgz'):
op = gzip.GzipFile(dst, 'wb')
else:
op = open(dst, 'wb')
op.write(lxml.etree.tostring(tree))
op.close()
except (KeyError, AttributeError) as e:
self.logger.warn("No width/height in %s. Original exception: %s" % (src, e))
utils.copy_file(src, dst)
def image_date(self, src):
"""Try to figure out the date of the image."""
if src not in self.dates:
try:
im = Image.open(src)
exif = im._getexif()
except Exception:
exif = None
if exif is not None:
for tag, value in list(exif.items()):
decoded = ExifTags.TAGS.get(tag, tag)
if decoded in ('DateTimeOriginal', 'DateTimeDigitized'):
try:
if isinstance(value, tuple):
value = value[0]
self.dates[src] = datetime.datetime.strptime(
value, '%Y:%m:%d %H:%M:%S')
break
except ValueError: # Invalid EXIF date.
pass
if src not in self.dates:
self.dates[src] = datetime.datetime.fromtimestamp(
os.stat(src).st_mtime)
return self.dates[src]
|
dhruvagarwal/django
|
refs/heads/master
|
django/db/migrations/operations/__init__.py
|
394
|
from .fields import AddField, AlterField, RemoveField, RenameField
from .models import (
AlterIndexTogether, AlterModelManagers, AlterModelOptions, AlterModelTable,
AlterOrderWithRespectTo, AlterUniqueTogether, CreateModel, DeleteModel,
RenameModel,
)
from .special import RunPython, RunSQL, SeparateDatabaseAndState
__all__ = [
'CreateModel', 'DeleteModel', 'AlterModelTable', 'AlterUniqueTogether',
'RenameModel', 'AlterIndexTogether', 'AlterModelOptions',
'AddField', 'RemoveField', 'AlterField', 'RenameField',
'SeparateDatabaseAndState', 'RunSQL', 'RunPython',
'AlterOrderWithRespectTo', 'AlterModelManagers',
]
|
adviti/melange
|
refs/heads/master
|
app/atom/core.py
|
80
|
#!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import inspect
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from xml.dom.minidom import parseString as xmlString
except ImportError:
xmlString = None
STRING_ENCODING = 'utf-8'
class XmlElement(object):
"""Represents an element node in an XML document.
The text member is a UTF-8 encoded str or unicode.
"""
_qname = None
_other_elements = None
_other_attributes = None
# The rule set contains mappings for XML qnames to child members and the
# appropriate member classes.
_rule_set = None
_members = None
text = None
def __init__(self, text=None, *args, **kwargs):
if ('_members' not in self.__class__.__dict__
or self.__class__._members is None):
self.__class__._members = tuple(self.__class__._list_xml_members())
for member_name, member_type in self.__class__._members:
if member_name in kwargs:
setattr(self, member_name, kwargs[member_name])
else:
if isinstance(member_type, list):
setattr(self, member_name, [])
else:
setattr(self, member_name, None)
self._other_elements = []
self._other_attributes = {}
if text is not None:
self.text = text
def _list_xml_members(cls):
"""Generator listing all members which are XML elements or attributes.
The following members would be considered XML members:
foo = 'abc' - indicates an XML attribute with the qname abc
foo = SomeElement - indicates an XML child element
foo = [AnElement] - indicates a repeating XML child element, each instance
will be stored in a list in this member
foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML
attribute which has different parsing rules in different versions of
the protocol. Version 1 of the XML parsing rules will look for an
attribute with the qname 'att1' but verion 2 of the parsing rules will
look for a namespaced attribute with the local name of 'att2' and an
XML namespace of 'http://example.com/namespace'.
"""
members = []
for pair in inspect.getmembers(cls):
if not pair[0].startswith('_') and pair[0] != 'text':
member_type = pair[1]
if (isinstance(member_type, tuple) or isinstance(member_type, list)
or isinstance(member_type, (str, unicode))
or (inspect.isclass(member_type)
and issubclass(member_type, XmlElement))):
members.append(pair)
return members
_list_xml_members = classmethod(_list_xml_members)
def _get_rules(cls, version):
"""Initializes the _rule_set for the class which is used when parsing XML.
This method is used internally for parsing and generating XML for an
XmlElement. It is not recommended that you call this method directly.
Returns:
A tuple containing the XML parsing rules for the appropriate version.
The tuple looks like:
(qname, {sub_element_qname: (member_name, member_class, repeating), ..},
{attribute_qname: member_name})
To give a couple of concrete example, the atom.data.Control _get_rules
with version of 2 will return:
('{http://www.w3.org/2007/app}control',
{'{http://www.w3.org/2007/app}draft': ('draft',
<class 'atom.data.Draft'>,
False)},
{})
Calling _get_rules with version 1 on gdata.data.FeedLink will produce:
('{http://schemas.google.com/g/2005}feedLink',
{'{http://www.w3.org/2005/Atom}feed': ('feed',
<class 'gdata.data.GDFeed'>,
False)},
{'href': 'href', 'readOnly': 'read_only', 'countHint': 'count_hint',
'rel': 'rel'})
"""
# Initialize the _rule_set to make sure there is a slot available to store
# the parsing rules for this version of the XML schema.
# Look for rule set in the class __dict__ proxy so that only the
# _rule_set for this class will be found. By using the dict proxy
# we avoid finding rule_sets defined in superclasses.
# The four lines below provide support for any number of versions, but it
# runs a bit slower then hard coding slots for two versions, so I'm using
# the below two lines.
#if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
# cls._rule_set = []
#while len(cls.__dict__['_rule_set']) < version:
# cls._rule_set.append(None)
# If there is no rule set cache in the class, provide slots for two XML
# versions. If and when there is a version 3, this list will need to be
# expanded.
if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
cls._rule_set = [None, None]
# If a version higher than 2 is requested, fall back to version 2 because
# 2 is currently the highest supported version.
if version > 2:
return cls._get_rules(2)
# Check the dict proxy for the rule set to avoid finding any rule sets
# which belong to the superclass. We only want rule sets for this class.
if cls._rule_set[version-1] is None:
# The rule set for each version consists of the qname for this element
# ('{namespace}tag'), a dictionary (elements) for looking up the
# corresponding class member when given a child element's qname, and a
# dictionary (attributes) for looking up the corresponding class member
# when given an XML attribute's qname.
elements = {}
attributes = {}
if ('_members' not in cls.__dict__ or cls._members is None):
cls._members = tuple(cls._list_xml_members())
for member_name, target in cls._members:
if isinstance(target, list):
# This member points to a repeating element.
elements[_get_qname(target[0], version)] = (member_name, target[0],
True)
elif isinstance(target, tuple):
# This member points to a versioned XML attribute.
if version <= len(target):
attributes[target[version-1]] = member_name
else:
attributes[target[-1]] = member_name
elif isinstance(target, (str, unicode)):
# This member points to an XML attribute.
attributes[target] = member_name
elif issubclass(target, XmlElement):
# This member points to a single occurance element.
elements[_get_qname(target, version)] = (member_name, target, False)
version_rules = (_get_qname(cls, version), elements, attributes)
cls._rule_set[version-1] = version_rules
return version_rules
else:
return cls._rule_set[version-1]
_get_rules = classmethod(_get_rules)
def get_elements(self, tag=None, namespace=None, version=1):
"""Find all sub elements which match the tag and namespace.
To find all elements in this object, call get_elements with the tag and
namespace both set to None (the default). This method searches through
the object's members and the elements stored in _other_elements which
did not match any of the XML parsing rules for this class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching elements.
Returns:
A list of the matching XmlElements.
"""
matches = []
ignored1, elements, ignored2 = self.__class__._get_rules(version)
if elements:
for qname, element_def in elements.iteritems():
member = getattr(self, element_def[0])
if member:
if _qname_matches(tag, namespace, qname):
if element_def[2]:
# If this is a repeating element, copy all instances into the
# result list.
matches.extend(member)
else:
matches.append(member)
for element in self._other_elements:
if _qname_matches(tag, namespace, element._qname):
matches.append(element)
return matches
GetElements = get_elements
# FindExtensions and FindChildren are provided for backwards compatibility
# to the atom.AtomBase class.
# However, FindExtensions may return more results than the v1 atom.AtomBase
# method does, because get_elements searches both the expected children
# and the unexpected "other elements". The old AtomBase.FindExtensions
# method searched only "other elements" AKA extension_elements.
FindExtensions = get_elements
FindChildren = get_elements
def get_attributes(self, tag=None, namespace=None, version=1):
"""Find all attributes which match the tag and namespace.
To find all attributes in this object, call get_attributes with the tag
and namespace both set to None (the default). This method searches
through the object's members and the attributes stored in
_other_attributes which did not fit any of the XML parsing rules for this
class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching attributes.
Returns:
A list of XmlAttribute objects for the matching attributes.
"""
matches = []
ignored1, ignored2, attributes = self.__class__._get_rules(version)
if attributes:
for qname, attribute_def in attributes.iteritems():
if isinstance(attribute_def, (list, tuple)):
attribute_def = attribute_def[0]
member = getattr(self, attribute_def)
# TODO: ensure this hasn't broken existing behavior.
#member = getattr(self, attribute_def[0])
if member:
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, member))
for qname, value in self._other_attributes.iteritems():
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, value))
return matches
GetAttributes = get_attributes
def _harvest_tree(self, tree, version=1):
"""Populates object members from the data in the tree Element."""
qname, elements, attributes = self.__class__._get_rules(version)
for element in tree:
if elements and element.tag in elements:
definition = elements[element.tag]
# If this is a repeating element, make sure the member is set to a
# list.
if definition[2]:
if getattr(self, definition[0]) is None:
setattr(self, definition[0], [])
getattr(self, definition[0]).append(_xml_element_from_tree(element,
definition[1], version))
else:
setattr(self, definition[0], _xml_element_from_tree(element,
definition[1], version))
else:
self._other_elements.append(_xml_element_from_tree(element, XmlElement,
version))
for attrib, value in tree.attrib.iteritems():
if attributes and attrib in attributes:
setattr(self, attributes[attrib], value)
else:
self._other_attributes[attrib] = value
if tree.text:
self.text = tree.text
def _to_tree(self, version=1, encoding=None):
new_tree = ElementTree.Element(_get_qname(self, version))
self._attach_members(new_tree, version, encoding)
return new_tree
def _attach_members(self, tree, version=1, encoding=None):
"""Convert members to XML elements/attributes and add them to the tree.
Args:
tree: An ElementTree.Element which will be modified. The members of
this object will be added as child elements or attributes
according to the rules described in _expected_elements and
_expected_attributes. The elements and attributes stored in
other_attributes and other_elements are also added a children
of this tree.
version: int Ingnored in this method but used by VersionedElement.
encoding: str (optional)
"""
qname, elements, attributes = self.__class__._get_rules(version)
encoding = encoding or STRING_ENCODING
# Add the expected elements and attributes to the tree.
if elements:
for tag, element_def in elements.iteritems():
member = getattr(self, element_def[0])
# If this is a repeating element and there are members in the list.
if member and element_def[2]:
for instance in member:
instance._become_child(tree, version)
elif member:
member._become_child(tree, version)
if attributes:
for attribute_tag, member_name in attributes.iteritems():
value = getattr(self, member_name)
if value:
tree.attrib[attribute_tag] = value
# Add the unexpected (other) elements and attributes to the tree.
for element in self._other_elements:
element._become_child(tree, version)
for key, value in self._other_attributes.iteritems():
# I'm not sure if unicode can be used in the attribute name, so for now
# we assume the encoding is correct for the attribute name.
if not isinstance(value, unicode):
value = value.decode(encoding)
tree.attrib[key] = value
if self.text:
if isinstance(self.text, unicode):
tree.text = self.text
else:
tree.text = self.text.decode(encoding)
def to_string(self, version=1, encoding=None, pretty_print=None):
"""Converts this object to XML."""
tree_string = ElementTree.tostring(self._to_tree(version, encoding))
if pretty_print and xmlString is not None:
return xmlString(tree_string).toprettyxml()
return tree_string
ToString = to_string
def __str__(self):
return self.to_string()
def _become_child(self, tree, version=1):
"""Adds a child element to tree with the XML data in self."""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = _get_qname(self, version)
self._attach_members(new_child, version)
def __get_extension_elements(self):
return self._other_elements
def __set_extension_elements(self, elements):
self._other_elements = elements
extension_elements = property(__get_extension_elements,
__set_extension_elements,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def __get_extension_attributes(self):
return self._other_attributes
def __set_extension_attributes(self, attributes):
self._other_attributes = attributes
extension_attributes = property(__get_extension_attributes,
__set_extension_attributes,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def _get_tag(self, version=1):
qname = _get_qname(self, version)
if qname:
return qname[qname.find('}')+1:]
return None
def _get_namespace(self, version=1):
qname = _get_qname(self, version)
if qname.startswith('{'):
return qname[1:qname.find('}')]
else:
return None
def _set_tag(self, tag):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if self._qname[0].startswith('{'):
self._qname[0] = '{%s}%s' % (self._get_namespace(1), tag)
else:
self._qname[0] = tag
else:
if self._qname is not None and self._qname.startswith('{'):
self._qname = '{%s}%s' % (self._get_namespace(), tag)
else:
self._qname = tag
def _set_namespace(self, namespace):
tag = self._get_tag(1)
if tag is None:
tag = ''
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if namespace:
self._qname[0] = '{%s}%s' % (namespace, tag)
else:
self._qname[0] = tag
else:
if namespace:
self._qname = '{%s}%s' % (namespace, tag)
else:
self._qname = tag
tag = property(_get_tag, _set_tag,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
namespace = property(_get_namespace, _set_namespace,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
# Provided for backwards compatibility to atom.ExtensionElement
children = extension_elements
attributes = extension_attributes
def _get_qname(element, version):
if isinstance(element._qname, tuple):
if version <= len(element._qname):
return element._qname[version-1]
else:
return element._qname[-1]
else:
return element._qname
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None))
def parse(xml_string, target_class=None, version=1, encoding=None):
"""Parses the XML string according to the rules for the target_class.
Args:
xml_string: str or unicode
target_class: XmlElement or a subclass. If None is specified, the
XmlElement class is used.
version: int (optional) The version of the schema which should be used when
converting the XML into an object. The default is 1.
encoding: str (optional) The character encoding of the bytes in the
xml_string. Default is 'UTF-8'.
"""
if target_class is None:
target_class = XmlElement
if isinstance(xml_string, unicode):
if encoding is None:
xml_string = xml_string.encode(STRING_ENCODING)
else:
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _xml_element_from_tree(tree, target_class, version)
Parse = parse
xml_element_from_string = parse
XmlElementFromString = xml_element_from_string
def _xml_element_from_tree(tree, target_class, version=1):
if target_class._qname is None:
instance = target_class()
instance._qname = tree.tag
instance._harvest_tree(tree, version)
return instance
# TODO handle the namespace-only case
# Namespace only will be used with Google Spreadsheets rows and
# Google Base item attributes.
elif tree.tag == _get_qname(target_class, version):
instance = target_class()
instance._harvest_tree(tree, version)
return instance
return None
class XmlAttribute(object):
def __init__(self, qname, value):
self._qname = qname
self.value = value
|
citrix-openstack-build/neutron
|
refs/heads/master
|
neutron/tests/unit/test_attributes.py
|
2
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from neutron.api.v2 import attributes
from neutron.common import exceptions as q_exc
from neutron.tests import base
class TestAttributes(base.BaseTestCase):
def _construct_dict_and_constraints(self):
"""Constructs a test dictionary and a definition of constraints.
:return: A (dictionary, constraint) tuple
"""
constraints = {'key1': {'type:values': ['val1', 'val2'],
'required': True},
'key2': {'type:string': None,
'required': False},
'key3': {'type:dict': {'k4': {'type:string': None,
'required': True}},
'required': True}}
dictionary = {'key1': 'val1',
'key2': 'a string value',
'key3': {'k4': 'a string value'}}
return dictionary, constraints
def test_is_attr_set(self):
data = attributes.ATTR_NOT_SPECIFIED
self.assertIs(attributes.is_attr_set(data), False)
data = None
self.assertIs(attributes.is_attr_set(data), False)
data = "I'm set"
self.assertIs(attributes.is_attr_set(data), True)
def test_validate_values(self):
msg = attributes._validate_values(4, [4, 6])
self.assertIsNone(msg)
msg = attributes._validate_values(4, (4, 6))
self.assertIsNone(msg)
msg = attributes._validate_values(7, [4, 6])
self.assertEqual(msg, "'7' is not in [4, 6]")
msg = attributes._validate_values(7, (4, 6))
self.assertEqual(msg, "'7' is not in (4, 6)")
def test_validate_string(self):
msg = attributes._validate_string(None, None)
self.assertEqual(msg, "'None' is not a valid string")
# 0 == len(data) == max_len
msg = attributes._validate_string("", 0)
self.assertIsNone(msg)
# 0 == len(data) < max_len
msg = attributes._validate_string("", 9)
self.assertIsNone(msg)
# 0 < len(data) < max_len
msg = attributes._validate_string("123456789", 10)
self.assertIsNone(msg)
# 0 < len(data) == max_len
msg = attributes._validate_string("123456789", 9)
self.assertIsNone(msg)
# 0 < max_len < len(data)
msg = attributes._validate_string("1234567890", 9)
self.assertEqual(msg, "'1234567890' exceeds maximum length of 9")
msg = attributes._validate_string("123456789", None)
self.assertIsNone(msg)
def test_validate_no_whitespace(self):
data = 'no_white_space'
result = attributes._validate_no_whitespace(data)
self.assertEqual(result, data)
self.assertRaises(q_exc.InvalidInput,
attributes._validate_no_whitespace,
'i have whitespace')
self.assertRaises(q_exc.InvalidInput,
attributes._validate_no_whitespace,
'i\thave\twhitespace')
def test_validate_range(self):
msg = attributes._validate_range(1, [1, 9])
self.assertIsNone(msg)
msg = attributes._validate_range(5, [1, 9])
self.assertIsNone(msg)
msg = attributes._validate_range(9, [1, 9])
self.assertIsNone(msg)
msg = attributes._validate_range(1, (1, 9))
self.assertIsNone(msg)
msg = attributes._validate_range(5, (1, 9))
self.assertIsNone(msg)
msg = attributes._validate_range(9, (1, 9))
self.assertIsNone(msg)
msg = attributes._validate_range(0, [1, 9])
self.assertEqual(msg, "'0' is too small - must be at least '1'")
msg = attributes._validate_range(10, (1, 9))
self.assertEqual(msg,
"'10' is too large - must be no larger than '9'")
msg = attributes._validate_range("bogus", (1, 9))
self.assertEqual(msg, "'bogus' is not an integer")
msg = attributes._validate_range(10, (attributes.UNLIMITED,
attributes.UNLIMITED))
self.assertIsNone(msg)
msg = attributes._validate_range(10, (1, attributes.UNLIMITED))
self.assertIsNone(msg)
msg = attributes._validate_range(1, (attributes.UNLIMITED, 9))
self.assertIsNone(msg)
msg = attributes._validate_range(-1, (0, attributes.UNLIMITED))
self.assertEqual(msg, "'-1' is too small - must be at least '0'")
msg = attributes._validate_range(10, (attributes.UNLIMITED, 9))
self.assertEqual(msg,
"'10' is too large - must be no larger than '9'")
def test_validate_mac_address(self):
mac_addr = "ff:16:3e:4f:00:00"
msg = attributes._validate_mac_address(mac_addr)
self.assertIsNone(msg)
mac_addr = "ffa:16:3e:4f:00:00"
msg = attributes._validate_mac_address(mac_addr)
self.assertEqual(msg, "'%s' is not a valid MAC address" % mac_addr)
def test_validate_ip_address(self):
ip_addr = '1.1.1.1'
msg = attributes._validate_ip_address(ip_addr)
self.assertIsNone(msg)
ip_addr = '1111.1.1.1'
msg = attributes._validate_ip_address(ip_addr)
self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr)
ip_addr = '1.1.1.1 has whitespace'
msg = attributes._validate_ip_address(ip_addr)
self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr)
ip_addr = '111.1.1.1\twhitespace'
msg = attributes._validate_ip_address(ip_addr)
self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr)
ip_addr = '111.1.1.1\nwhitespace'
msg = attributes._validate_ip_address(ip_addr)
self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr)
def test_validate_ip_pools(self):
pools = [[{'end': '10.0.0.254'}],
[{'start': '10.0.0.254'}],
[{'start': '1000.0.0.254',
'end': '1.1.1.1'}],
[{'start': '10.0.0.2', 'end': '10.0.0.254',
'forza': 'juve'}],
[{'start': '10.0.0.2', 'end': '10.0.0.254'},
{'end': '10.0.0.254'}],
[None],
None]
for pool in pools:
msg = attributes._validate_ip_pools(pool)
self.assertIsNotNone(msg)
pools = [[{'end': '10.0.0.254', 'start': '10.0.0.2'},
{'start': '11.0.0.2', 'end': '11.1.1.1'}],
[{'start': '11.0.0.2', 'end': '11.0.0.100'}]]
for pool in pools:
msg = attributes._validate_ip_pools(pool)
self.assertIsNone(msg)
def test_validate_fixed_ips(self):
fixed_ips = [
{'data': [{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
'ip_address': '1111.1.1.1'}],
'error_msg': "'1111.1.1.1' is not a valid IP address"},
{'data': [{'subnet_id': 'invalid',
'ip_address': '1.1.1.1'}],
'error_msg': "'invalid' is not a valid UUID"},
{'data': None,
'error_msg': "Invalid data format for fixed IP: 'None'"},
{'data': "1.1.1.1",
'error_msg': "Invalid data format for fixed IP: '1.1.1.1'"},
{'data': ['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1'],
'error_msg': "Invalid data format for fixed IP: "
"'00000000-ffff-ffff-ffff-000000000000'"},
{'data': [['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1']],
'error_msg': "Invalid data format for fixed IP: "
"'['00000000-ffff-ffff-ffff-000000000000', "
"'1.1.1.1']'"},
{'data': [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000',
'ip_address': '1.1.1.1'},
{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
'ip_address': '1.1.1.1'}],
'error_msg': "Duplicate IP address '1.1.1.1'"}]
for fixed in fixed_ips:
msg = attributes._validate_fixed_ips(fixed['data'])
self.assertEqual(msg, fixed['error_msg'])
fixed_ips = [[{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
'ip_address': '1.1.1.1'}],
[{'subnet_id': '00000000-0fff-ffff-ffff-000000000000',
'ip_address': '1.1.1.1'},
{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
'ip_address': '1.1.1.2'}]]
for fixed in fixed_ips:
msg = attributes._validate_fixed_ips(fixed)
self.assertIsNone(msg)
def test_validate_nameservers(self):
ns_pools = [['1.1.1.2', '1.1.1.2'],
['www.hostname.com', 'www.hostname.com'],
['77.hostname.com'],
['1000.0.0.1'],
None]
for ns in ns_pools:
msg = attributes._validate_nameservers(ns, None)
self.assertIsNotNone(msg)
ns_pools = [['100.0.0.2'],
['www.hostname.com'],
['www.great.marathons.to.travel'],
['valid'],
['www.internal.hostname.com']]
for ns in ns_pools:
msg = attributes._validate_nameservers(ns, None)
self.assertIsNone(msg)
def test_validate_hostroutes(self):
hostroute_pools = [[{'destination': '100.0.0.0/24'}],
[{'nexthop': '10.0.2.20'}],
[{'nexthop': '10.0.2.20',
'forza': 'juve',
'destination': '100.0.0.0/8'}],
[{'nexthop': '1110.0.2.20',
'destination': '100.0.0.0/8'}],
[{'nexthop': '10.0.2.20',
'destination': '100.0.0.0'}],
[{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'},
{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'}],
[None],
None]
for host_routes in hostroute_pools:
msg = attributes._validate_hostroutes(host_routes, None)
self.assertIsNotNone(msg)
hostroute_pools = [[{'destination': '100.0.0.0/24',
'nexthop': '10.0.2.20'}],
[{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'},
{'nexthop': '10.0.2.20',
'destination': '101.0.0.0/8'}]]
for host_routes in hostroute_pools:
msg = attributes._validate_hostroutes(host_routes, None)
self.assertIsNone(msg)
def test_validate_ip_address_or_none(self):
ip_addr = None
msg = attributes._validate_ip_address_or_none(ip_addr)
self.assertIsNone(msg)
ip_addr = '1.1.1.1'
msg = attributes._validate_ip_address_or_none(ip_addr)
self.assertIsNone(msg)
ip_addr = '1111.1.1.1'
msg = attributes._validate_ip_address_or_none(ip_addr)
self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr)
def test_hostname_pattern(self):
data = '@openstack'
msg = attributes._validate_regex(data, attributes.HOSTNAME_PATTERN)
self.assertIsNotNone(msg)
data = 'www.openstack.org'
msg = attributes._validate_regex(data, attributes.HOSTNAME_PATTERN)
self.assertIsNone(msg)
def test_uuid_pattern(self):
data = 'garbage'
msg = attributes._validate_regex(data, attributes.UUID_PATTERN)
self.assertIsNotNone(msg)
data = '00000000-ffff-ffff-ffff-000000000000'
msg = attributes._validate_regex(data, attributes.UUID_PATTERN)
self.assertIsNone(msg)
def test_mac_pattern(self):
# Valid - 3 octets
base_mac = "fa:16:3e:00:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNone(msg)
# Valid - 4 octets
base_mac = "fa:16:3e:4f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNone(msg)
# Invalid - not unicast
base_mac = "01:16:3e:4f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "a:16:3e:4f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "ffa:16:3e:4f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "01163e4f0000"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "01-16-3e-4f-00-00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "00:16:3:f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "12:3:4:5:67:89ab"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
def test_validate_subnet(self):
# Valid - IPv4
cidr = "10.0.2.0/24"
msg = attributes._validate_subnet(cidr,
None)
self.assertIsNone(msg)
# Valid - IPv6 without final octets
cidr = "fe80::/24"
msg = attributes._validate_subnet(cidr,
None)
self.assertIsNone(msg)
# Valid - IPv6 with final octets
cidr = "fe80::/24"
msg = attributes._validate_subnet(cidr,
None)
self.assertIsNone(msg)
# Invalid - IPv4 missing mask
cidr = "10.0.2.0"
msg = attributes._validate_subnet(cidr,
None)
error = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": cidr,
"cidr": "10.0.2.0/32"}
self.assertEqual(msg, error)
# Invalid - IPv4 with final octets
cidr = "192.168.1.1/24"
msg = attributes._validate_subnet(cidr,
None)
error = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": cidr,
"cidr": "192.168.1.0/24"}
self.assertEqual(msg, error)
# Invalid - IPv6 without final octets, missing mask
cidr = "fe80::"
msg = attributes._validate_subnet(cidr,
None)
error = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": cidr,
"cidr": "fe80::/128"}
self.assertEqual(msg, error)
# Invalid - IPv6 with final octets, missing mask
cidr = "fe80::0"
msg = attributes._validate_subnet(cidr,
None)
error = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": cidr,
"cidr": "fe80::/128"}
self.assertEqual(msg, error)
# Invalid - Address format error
cidr = 'invalid'
msg = attributes._validate_subnet(cidr,
None)
error = "'%s' is not a valid IP subnet" % cidr
self.assertEqual(msg, error)
def test_validate_regex(self):
pattern = '[hc]at'
data = None
msg = attributes._validate_regex(data, pattern)
self.assertEqual(msg, "'%s' is not a valid input" % data)
data = 'bat'
msg = attributes._validate_regex(data, pattern)
self.assertEqual(msg, "'%s' is not a valid input" % data)
data = 'hat'
msg = attributes._validate_regex(data, pattern)
self.assertIsNone(msg)
data = 'cat'
msg = attributes._validate_regex(data, pattern)
self.assertIsNone(msg)
def test_validate_uuid(self):
msg = attributes._validate_uuid('garbage')
self.assertEqual(msg, "'garbage' is not a valid UUID")
msg = attributes._validate_uuid('00000000-ffff-ffff-ffff-000000000000')
self.assertIsNone(msg)
def test_validate_uuid_list(self):
# check not a list
uuids = [None,
123,
'e5069610-744b-42a7-8bd8-ceac1a229cd4',
'12345678123456781234567812345678',
{'uuid': 'e5069610-744b-42a7-8bd8-ceac1a229cd4'}]
for uuid in uuids:
msg = attributes._validate_uuid_list(uuid)
error = "'%s' is not a list" % uuid
self.assertEqual(msg, error)
# check invalid uuid in a list
invalid_uuid_lists = [[None],
[123],
[123, 'e5069610-744b-42a7-8bd8-ceac1a229cd4'],
['123', '12345678123456781234567812345678'],
['t5069610-744b-42a7-8bd8-ceac1a229cd4'],
['e5069610-744b-42a7-8bd8-ceac1a229cd44'],
['e50696100-744b-42a7-8bd8-ceac1a229cd4'],
['e5069610-744bb-42a7-8bd8-ceac1a229cd4']]
for uuid_list in invalid_uuid_lists:
msg = attributes._validate_uuid_list(uuid_list)
error = "'%s' is not a valid UUID" % uuid_list[0]
self.assertEqual(msg, error)
# check duplicate items in a list
duplicate_uuids = ['e5069610-744b-42a7-8bd8-ceac1a229cd4',
'f3eeab00-8367-4524-b662-55e64d4cacb5',
'e5069610-744b-42a7-8bd8-ceac1a229cd4']
msg = attributes._validate_uuid_list(duplicate_uuids)
error = ("Duplicate items in the list: "
"'%s'" % ', '.join(duplicate_uuids))
self.assertEqual(msg, error)
# check valid uuid lists
valid_uuid_lists = [['e5069610-744b-42a7-8bd8-ceac1a229cd4'],
['f3eeab00-8367-4524-b662-55e64d4cacb5'],
['e5069610-744b-42a7-8bd8-ceac1a229cd4',
'f3eeab00-8367-4524-b662-55e64d4cacb5']]
for uuid_list in valid_uuid_lists:
msg = attributes._validate_uuid_list(uuid_list)
self.assertEqual(msg, None)
def test_validate_dict_type(self):
for value in (None, True, '1', []):
self.assertEqual(attributes._validate_dict(value),
"'%s' is not a dictionary" % value)
def test_validate_dict_without_constraints(self):
msg = attributes._validate_dict({})
self.assertIsNone(msg)
# Validate a dictionary without constraints.
msg = attributes._validate_dict({'key': 'value'})
self.assertIsNone(msg)
def test_validate_a_valid_dict_with_constraints(self):
dictionary, constraints = self._construct_dict_and_constraints()
msg = attributes._validate_dict(dictionary, constraints)
self.assertIsNone(msg, 'Validation of a valid dictionary failed.')
def test_validate_dict_with_invalid_validator(self):
dictionary, constraints = self._construct_dict_and_constraints()
constraints['key1'] = {'type:unsupported': None, 'required': True}
msg = attributes._validate_dict(dictionary, constraints)
self.assertEqual(msg, "Validator 'type:unsupported' does not exist.")
def test_validate_dict_not_required_keys(self):
dictionary, constraints = self._construct_dict_and_constraints()
del dictionary['key2']
msg = attributes._validate_dict(dictionary, constraints)
self.assertIsNone(msg, 'Field that was not required by the specs was'
'required by the validator.')
def test_validate_dict_required_keys(self):
dictionary, constraints = self._construct_dict_and_constraints()
del dictionary['key1']
msg = attributes._validate_dict(dictionary, constraints)
self.assertIn('Expected keys:', msg)
def test_validate_dict_wrong_values(self):
dictionary, constraints = self._construct_dict_and_constraints()
dictionary['key1'] = 'UNSUPPORTED'
msg = attributes._validate_dict(dictionary, constraints)
self.assertIsNotNone(msg)
def test_validate_dict_convert_boolean(self):
dictionary, constraints = self._construct_dict_and_constraints()
constraints['key_bool'] = {
'type:boolean': None,
'required': False,
'convert_to': attributes.convert_to_boolean}
dictionary['key_bool'] = 'true'
msg = attributes._validate_dict(dictionary, constraints)
self.assertIsNone(msg)
# Explicitly comparing with literal 'True' as assertTrue
# succeeds also for 'true'
self.assertIs(True, dictionary['key_bool'])
def test_subdictionary(self):
dictionary, constraints = self._construct_dict_and_constraints()
del dictionary['key3']['k4']
dictionary['key3']['k5'] = 'a string value'
msg = attributes._validate_dict(dictionary, constraints)
self.assertIn('Expected keys:', msg)
def test_validate_dict_or_none(self):
dictionary, constraints = self._construct_dict_and_constraints()
# Check whether None is a valid value.
msg = attributes._validate_dict_or_none(None, constraints)
self.assertIsNone(msg, 'Validation of a None dictionary failed.')
# Check validation of a regular dictionary.
msg = attributes._validate_dict_or_none(dictionary, constraints)
self.assertIsNone(msg, 'Validation of a valid dictionary failed.')
def test_validate_dict_or_empty(self):
dictionary, constraints = self._construct_dict_and_constraints()
# Check whether an empty dictionary is valid.
msg = attributes._validate_dict_or_empty({}, constraints)
self.assertIsNone(msg, 'Validation of a None dictionary failed.')
# Check validation of a regular dictionary.
msg = attributes._validate_dict_or_none(dictionary, constraints)
self.assertIsNone(msg, 'Validation of a valid dictionary failed.')
self.assertIsNone(msg, 'Validation of a valid dictionary failed.')
def test_validate_non_negative(self):
for value in (-1, '-2'):
self.assertEqual(attributes._validate_non_negative(value),
"'%s' should be non-negative" % value)
for value in (0, 1, '2', True, False):
msg = attributes._validate_non_negative(value)
self.assertIsNone(msg)
class TestConvertToBoolean(base.BaseTestCase):
def test_convert_to_boolean_bool(self):
self.assertIs(attributes.convert_to_boolean(True), True)
self.assertIs(attributes.convert_to_boolean(False), False)
def test_convert_to_boolean_int(self):
self.assertIs(attributes.convert_to_boolean(0), False)
self.assertIs(attributes.convert_to_boolean(1), True)
self.assertRaises(q_exc.InvalidInput,
attributes.convert_to_boolean,
7)
def test_convert_to_boolean_str(self):
self.assertIs(attributes.convert_to_boolean('True'), True)
self.assertIs(attributes.convert_to_boolean('true'), True)
self.assertIs(attributes.convert_to_boolean('False'), False)
self.assertIs(attributes.convert_to_boolean('false'), False)
self.assertIs(attributes.convert_to_boolean('0'), False)
self.assertIs(attributes.convert_to_boolean('1'), True)
self.assertRaises(q_exc.InvalidInput,
attributes.convert_to_boolean,
'7')
class TestConvertToInt(base.BaseTestCase):
def test_convert_to_int_int(self):
self.assertEqual(attributes.convert_to_int(-1), -1)
self.assertEqual(attributes.convert_to_int(0), 0)
self.assertEqual(attributes.convert_to_int(1), 1)
def test_convert_to_int_str(self):
self.assertEqual(attributes.convert_to_int('4'), 4)
self.assertEqual(attributes.convert_to_int('6'), 6)
self.assertRaises(q_exc.InvalidInput,
attributes.convert_to_int,
'garbage')
def test_convert_to_int_none(self):
self.assertRaises(q_exc.InvalidInput,
attributes.convert_to_int,
None)
def test_convert_none_to_empty_list_none(self):
self.assertEqual(
[], attributes.convert_none_to_empty_list(None))
def test_convert_none_to_empty_dict(self):
self.assertEqual(
{}, attributes.convert_none_to_empty_dict(None))
def test_convert_none_to_empty_list_value(self):
values = ['1', 3, [], [1], {}, {'a': 3}]
for value in values:
self.assertEqual(
value, attributes.convert_none_to_empty_list(value))
class TestConvertKvp(base.BaseTestCase):
def test_convert_kvp_list_to_dict_succeeds_for_missing_values(self):
result = attributes.convert_kvp_list_to_dict(['True'])
self.assertEqual({}, result)
def test_convert_kvp_list_to_dict_succeeds_for_multiple_values(self):
result = attributes.convert_kvp_list_to_dict(
['a=b', 'a=c', 'a=c', 'b=a'])
self.assertEqual({'a': ['c', 'b'], 'b': ['a']}, result)
def test_convert_kvp_list_to_dict_succeeds_for_values(self):
result = attributes.convert_kvp_list_to_dict(['a=b', 'c=d'])
self.assertEqual({'a': ['b'], 'c': ['d']}, result)
def test_convert_kvp_str_to_list_fails_for_missing_key(self):
with testtools.ExpectedException(q_exc.InvalidInput):
attributes.convert_kvp_str_to_list('=a')
def test_convert_kvp_str_to_list_fails_for_missing_equals(self):
with testtools.ExpectedException(q_exc.InvalidInput):
attributes.convert_kvp_str_to_list('a')
def test_convert_kvp_str_to_list_succeeds_for_one_equals(self):
result = attributes.convert_kvp_str_to_list('a=')
self.assertEqual(['a', ''], result)
def test_convert_kvp_str_to_list_succeeds_for_two_equals(self):
result = attributes.convert_kvp_str_to_list('a=a=a')
self.assertEqual(['a', 'a=a'], result)
class TestConvertToList(base.BaseTestCase):
def test_convert_to_empty_list(self):
for item in (None, [], (), {}):
self.assertEqual(attributes.convert_to_list(item), [])
def test_convert_to_list_string(self):
for item in ('', 'foo'):
self.assertEqual(attributes.convert_to_list(item), [item])
def test_convert_to_list_iterable(self):
for item in ([None], [1, 2, 3], (1, 2, 3), set([1, 2, 3]), ['foo']):
self.assertEqual(attributes.convert_to_list(item), list(item))
def test_convert_to_list_non_iterable(self):
for item in (True, False, 1, 1.2, object()):
self.assertEqual(attributes.convert_to_list(item), [item])
|
Zlash65/erpnext
|
refs/heads/develop
|
erpnext/manufacturing/notification/material_request_receipt_notification/__init__.py
|
12133432
| |
devs1991/test_edx_docmode
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/conf/app_template/__init__.py
|
12133432
| |
Shrhawk/edx-platform
|
refs/heads/master
|
lms/lib/xblock/test/__init__.py
|
12133432
| |
zhanghenry/stocks
|
refs/heads/master
|
django/conf/locale/et/__init__.py
|
12133432
| |
40223220/2015_cdb_g7_40223220
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/fnmatch.py
|
894
|
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
|
saisankargochhayat/algo_quest
|
refs/heads/master
|
leetcode/kth-largest-element-in-an-array/kth_largest.py
|
1
|
# Problem link - https://leetcode.com/problems/kth-largest-element-in-an-array/
class Heap:
def __init__(self,A):
self.A = A
self.heapSize = len(self.A)
# This method maintains the max heap property.
def max_heapify(self, i):
largest = i
left = 2*i + 1
right = 2*i + 2
if left < self.heapSize and self.A[left] >= self.A[largest]:
largest = left
else: largest = i
if right < self.heapSize and self.A[right] >= self.A[largest]:
largest = right
if largest != i:
self.A[i], self.A[largest] = self.A[largest], self.A[i]
self.max_heapify(largest)
# We can use the procedure MAX-HEAPIFYin a bottom-up manner to convert anarrayAŒ1 : : n,wherenDA:length, into a max-heap.
# The procedure BUILD-MAX-HEAPgoes throughthe remaining nodes of the tree and runs MAX-HEAPIFYon each one.
def build_max_heap(self):
for i in range(int((self.heapSize/2)),-1,-1):
self.max_heapify(i)
def extract_max(self):
if self.heapSize < 1:
return None
maximum = self.A[0]
self.A[0] = self.A[self.heapSize-1]
self.heapSize -= 1
self.max_heapify(0)
return maximum
def heap_sort(self):
self.build_max_heap()
for i in range(self.heapSize-1,0,-1):
self.A[0], self.A[i] = self.A[i], self.A[0]
self.heapSize -= 1
self.max_heapify(0)
return self.A
# Testing heap sort!
myHeap = Heap([1,2,3,-17,4,7,9,12,8])
print(myHeap.heap_sort())
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
myHeap = Heap(nums)
myHeap.build_max_heap()
maximum = None
while k != 0:
maximum = myHeap.extract_max()
k -= 1
return maximum
|
RydrDojo/Ridr_app
|
refs/heads/master
|
pylotVenv/lib/python2.7/site-packages/setuptools/depends.py
|
114
|
import sys
import imp
import marshal
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from distutils.version import StrictVersion
from setuptools.extern import six
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name,self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f,p,i = find_module(self.module,paths)
if f: f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b',code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr<eof:
op = bytes[ptr]
if op>=HAVE_ARGUMENT:
arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
ptr += 3
if op==EXTENDED_ARG:
long_type = six.integer_types[-1]
extended_arg = arg * long_type(65536)
continue
else:
arg = None
ptr += 1
yield op,arg
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix,mode,kind) = info = imp.find_module(part, paths)
if kind==PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts,module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind==PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind==PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind==PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assigment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for op, arg in _iter_code(code):
if op==LOAD_CONST:
const = code.co_consts[arg]
elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
|
huaweiswitch/neutron
|
refs/heads/master
|
neutron/plugins/nec/common/ofc_client.py
|
7
|
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import requests
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.nec.common import config
from neutron.plugins.nec.common import exceptions as nexc
LOG = logging.getLogger(__name__)
class OFCClient(object):
"""A HTTP/HTTPS client for OFC Drivers."""
def __init__(self, host="127.0.0.1", port=8888, use_ssl=False,
key_file=None, cert_file=None, insecure_ssl=False):
"""Creates a new client to some OFC.
:param host: The host where service resides
:param port: The port where service resides
:param use_ssl: True to use SSL, False to use HTTP
:param key_file: The SSL key file to use if use_ssl is true
:param cert_file: The SSL cert file to use if use_ssl is true
:param insecure_ssl: Don't verify SSL certificate
"""
self.host = host
self.port = port
self.use_ssl = use_ssl
self.key_file = key_file
self.cert_file = cert_file
self.insecure_ssl = insecure_ssl
self.connection = None
def _format_error_message(self, status, detail):
detail = ' ' + detail if detail else ''
return (_("Operation on OFC failed: %(status)s%(msg)s") %
{'status': status, 'msg': detail})
def _get_response(self, method, action, body=None):
headers = {"Content-Type": "application/json"}
protocol = "http"
certs = {'key_file': self.key_file, 'cert_file': self.cert_file}
certs = dict((x, certs[x]) for x in certs if certs[x] is not None)
verify = True
if self.use_ssl:
protocol = "https"
if self.insecure_ssl:
verify = False
url = "%s://%s:%d%s" % (protocol, self.host, int(self.port),
action)
res = requests.request(method, url, data=body, headers=headers,
cert=certs, verify=verify)
return res
def do_single_request(self, method, action, body=None):
action = config.OFC.path_prefix + action
LOG.debug(_("Client request: %(host)s:%(port)s "
"%(method)s %(action)s [%(body)s]"),
{'host': self.host, 'port': self.port,
'method': method, 'action': action, 'body': body})
if type(body) is dict:
body = jsonutils.dumps(body)
try:
res = self._get_response(method, action, body)
data = res.text
LOG.debug(_("OFC returns [%(status)s:%(data)s]"),
{'status': res.status_code,
'data': data})
# Try to decode JSON data if possible.
try:
data = jsonutils.loads(data)
except (ValueError, TypeError):
pass
if res.status_code in (requests.codes.OK,
requests.codes.CREATED,
requests.codes.ACCEPTED,
requests.codes.NO_CONTENT):
return data
elif res.status_code == requests.codes.SERVICE_UNAVAILABLE:
retry_after = res.headers.get('retry-after')
LOG.warning(_("OFC returns ServiceUnavailable "
"(retry-after=%s)"), retry_after)
raise nexc.OFCServiceUnavailable(retry_after=retry_after)
elif res.status_code == requests.codes.NOT_FOUND:
LOG.info(_("Specified resource %s does not exist on OFC "),
action)
raise nexc.OFCResourceNotFound(resource=action)
else:
LOG.warning(_("Operation on OFC failed: "
"status=%(status)s, detail=%(detail)s"),
{'status': res.status_code, 'detail': data})
params = {'reason': _("Operation on OFC failed"),
'status': res.status_code}
if isinstance(data, dict):
params['err_code'] = data.get('err_code')
params['err_msg'] = data.get('err_msg')
else:
params['err_msg'] = data
raise nexc.OFCException(**params)
except requests.exceptions.RequestException as e:
reason = _("Failed to connect OFC : %s") % e
LOG.error(reason)
raise nexc.OFCException(reason=reason)
def do_request(self, method, action, body=None):
max_attempts = config.OFC.api_max_attempts
for i in range(max_attempts, 0, -1):
try:
return self.do_single_request(method, action, body)
except nexc.OFCServiceUnavailable as e:
with excutils.save_and_reraise_exception() as ctxt:
try:
wait_time = int(e.retry_after)
except (ValueError, TypeError):
wait_time = None
if i > 1 and wait_time:
LOG.info(_("Waiting for %s seconds due to "
"OFC Service_Unavailable."), wait_time)
time.sleep(wait_time)
ctxt.reraise = False
continue
def get(self, action):
return self.do_request("GET", action)
def post(self, action, body=None):
return self.do_request("POST", action, body=body)
def put(self, action, body=None):
return self.do_request("PUT", action, body=body)
def delete(self, action):
return self.do_request("DELETE", action)
|
gregpechiro/dndtools
|
refs/heads/master
|
dndtools/dnd/migrations/0065_auto__add_field_characterclassvariant_required_bab.py
|
1
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CharacterClassVariant.required_bab'
db.add_column('dnd_characterclassvariant', 'required_bab', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'CharacterClassVariant.required_bab'
db.delete_column('dnd_characterclassvariant', 'required_bab')
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'object_name': 'CharacterClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.characterclassvariant': {
'Meta': {'unique_together': "(('character_class', 'rulebook'),)", 'object_name': 'CharacterClassVariant'},
'advancement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'advancement_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'alignment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'class_features': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_features_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_skills': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Skill']", 'symmetrical': 'False'}),
'hit_die': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'required_bab': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'requirements_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill_points': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'dnd.characterclassvariantrequiresfeat': {
'Meta': {'object_name': 'CharacterClassVariantRequiresFeat'},
'character_class_variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.CharacterClassVariant']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'dnd.characterclassvariantrequiresrace': {
'Meta': {'object_name': 'CharacterClassVariantRequiresRace'},
'character_class_variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_races'", 'to': "orm['dnd.CharacterClassVariant']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Race']"})
},
'dnd.characterclassvariantrequiresskill': {
'Meta': {'object_name': 'CharacterClassVariantRequiresSkill'},
'character_class_variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.CharacterClassVariant']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ranks': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'object_name': 'Domain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.feat': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Feat'},
'benefit': ('django.db.models.fields.TextField', [], {}),
'benefit_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feat_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.FeatCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'normal': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'normal_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_feat_prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpecialFeatPrerequisite']", 'through': "orm['dnd.FeatSpecialFeatPrerequisite']", 'symmetrical': 'False'}),
'special_html': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.featcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FeatCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.featrequiresfeat': {
'Meta': {'object_name': 'FeatRequiresFeat'},
'additional_text': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'required_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_by_feats'", 'to': "orm['dnd.Feat']"}),
'source_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.Feat']"})
},
'dnd.featrequiresskill': {
'Meta': {'object_name': 'FeatRequiresSkill'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.featspecialfeatprerequisite': {
'Meta': {'object_name': 'FeatSpecialFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'special_feat_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpecialFeatPrerequisite']"}),
'value_1': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'value_2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'dnd.newsentry': {
'Meta': {'ordering': "['-published']", 'object_name': 'NewsEntry'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.DateField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.race': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Race'},
'cha': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'combat': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'combat_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'con': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dex': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'level_adjustment': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'racial_traits': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'racial_traits_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reach': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.RaceSize']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'space': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'str': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'wis': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'dnd.racefavoredcharacterclass': {
'Meta': {'object_name': 'RaceFavoredCharacterClass'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favored_classes'", 'to': "orm['dnd.Race']"})
},
'dnd.racesize': {
'Meta': {'ordering': "['order']", 'object_name': 'RaceSize'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'dnd.racespeed': {
'Meta': {'object_name': 'RaceSpeed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Race']"}),
'speed': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['dnd.RaceSpeedType']"})
},
'dnd.racespeedtype': {
'Meta': {'ordering': "['name', 'extra']", 'object_name': 'RaceSpeedType'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'published': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'armor_check_penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'base_skill': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'required_by_feats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Feat']", 'through': "orm['dnd.FeatRequiresSkill']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'trained_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.skillvariant': {
'Meta': {'unique_together': "(('skill', 'rulebook'),)", 'object_name': 'SkillVariant'},
'action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'action_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'check': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'check_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'try_again': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'try_again_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained_html': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.specialfeatprerequisite': {
'Meta': {'ordering': "['name']", 'object_name': 'SpecialFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'print_format': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False', 'blank': 'True'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'extra_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meta_breath_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'true_name_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'dnd.textfeatprerequisite': {
'Meta': {'ordering': "['text']", 'object_name': 'TextFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['dnd']
|
evanma92/routeh
|
refs/heads/master
|
flask/lib/python2.7/site-packages/flask_social/views.py
|
2
|
# -*- coding: utf-8 -*-
"""
flask.ext.social.views
~~~~~~~~~~~~~~~~~~~~~~
This module contains the Flask-Social views
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from importlib import import_module
from flask import Blueprint, current_app, redirect, request, session, \
after_this_request, abort, url_for
from flask.ext.security import current_user, login_required
from flask.ext.security.utils import get_post_login_redirect, login_user, \
get_url, do_flash
from flask.ext.security.decorators import anonymous_user_required
from werkzeug.local import LocalProxy
from .signals import connection_removed, connection_created, \
connection_failed, login_completed, login_failed
from .utils import config_value, get_provider_or_404, get_authorize_callback, \
get_connection_values_from_oauth_response
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
_social = LocalProxy(lambda: current_app.extensions['social'])
_datastore = LocalProxy(lambda: _social.datastore)
_logger = LocalProxy(lambda: current_app.logger)
def _commit(response=None):
_datastore.commit()
return response
@anonymous_user_required
def login(provider_id):
"""Starts the provider login OAuth flow"""
provider = get_provider_or_404(provider_id)
callback_url = get_authorize_callback('login', provider_id)
post_login = request.form.get('next', get_post_login_redirect())
session['post_oauth_login_url'] = post_login
return provider.authorize(callback_url)
@login_required
def connect(provider_id):
"""Starts the provider connection OAuth flow"""
provider = get_provider_or_404(provider_id)
callback_url = get_authorize_callback('connect', provider_id)
allow_view = get_url(config_value('CONNECT_ALLOW_VIEW'))
pc = request.form.get('next', allow_view)
session[config_value('POST_OAUTH_CONNECT_SESSION_KEY')] = pc
return provider.authorize(callback_url)
@login_required
def remove_all_connections(provider_id):
"""Remove all connections for the authenticated user to the
specified provider
"""
provider = get_provider_or_404(provider_id)
ctx = dict(provider=provider.name, user=current_user)
deleted = _datastore.delete_connections(user_id=current_user.get_id(),
provider_id=provider_id)
if deleted:
after_this_request(_commit)
msg = ('All connections to %s removed' % provider.name, 'info')
connection_removed.send(current_app._get_current_object(),
user=current_user._get_current_object(),
provider_id=provider_id)
else:
msg = ('Unable to remove connection to %(provider)s' % ctx, 'error')
do_flash(*msg)
return redirect(request.referrer)
@login_required
def remove_connection(provider_id, provider_user_id):
"""Remove a specific connection for the authenticated user to the
specified provider
"""
provider = get_provider_or_404(provider_id)
ctx = dict(provider=provider.name, user=current_user,
provider_user_id=provider_user_id)
deleted = _datastore.delete_connection(user_id=current_user.get_id(),
provider_id=provider_id,
provider_user_id=provider_user_id)
if deleted:
after_this_request(_commit)
msg = ('Connection to %(provider)s removed' % ctx, 'info')
connection_removed.send(current_app._get_current_object(),
user=current_user._get_current_object(),
provider_id=provider_id)
else:
msg = ('Unabled to remove connection to %(provider)s' % ctx, 'error')
do_flash(*msg)
return redirect(request.referrer or get_post_login_redirect())
def connect_handler(cv, provider):
"""Shared method to handle the connection process
:param connection_values: A dictionary containing the connection values
:param provider_id: The provider ID the connection shoudl be made to
"""
cv.setdefault('user_id', current_user.get_id())
connection = _datastore.find_connection(**cv)
if connection is None:
after_this_request(_commit)
connection = _datastore.create_connection(**cv)
msg = ('Connection established to %s' % provider.name, 'success')
connection_created.send(current_app._get_current_object(),
user=current_user._get_current_object(),
connection=connection)
else:
msg = ('A connection is already established with %s '
'to your account' % provider.name, 'notice')
connection_failed.send(current_app._get_current_object(),
user=current_user._get_current_object())
redirect_url = session.pop(config_value('POST_OAUTH_CONNECT_SESSION_KEY'),
get_url(config_value('CONNECT_ALLOW_VIEW')))
do_flash(*msg)
return redirect(redirect_url)
def connect_callback(provider_id):
provider = get_provider_or_404(provider_id)
def connect(response):
cv = get_connection_values_from_oauth_response(provider, response)
return cv
cv = provider.authorized_handler(connect)()
if cv is None:
do_flash('Access was denied by %s' % provider.name, 'error')
return redirect(get_url(config_value('CONNECT_DENY_VIEW')))
return connect_handler(cv, provider)
@anonymous_user_required
def login_handler(response, provider, query):
"""Shared method to handle the signin process"""
connection = _datastore.find_connection(**query)
if connection:
after_this_request(_commit)
user = connection.user
login_user(user)
key = _social.post_oauth_login_session_key
redirect_url = session.pop(key, get_post_login_redirect())
login_completed.send(current_app._get_current_object(),
provider=provider, user=user)
return redirect(redirect_url)
login_failed.send(current_app._get_current_object(),
provider=provider,
oauth_response=response)
next = get_url(_security.login_manager.login_view)
msg = '%s account not associated with an existing user' % provider.name
do_flash(msg, 'error')
return redirect(next)
def login_callback(provider_id):
try:
provider = _social.providers[provider_id]
module = import_module(provider.module)
except KeyError:
abort(404)
def login(response):
_logger.debug('Received login response from '
'%s: %s' % (provider.name, response))
if response is None:
do_flash('Access was denied to your %s '
'account' % provider.name, 'error')
return _security.login_manager.unauthorized(), None
query = dict(provider_user_id=module.get_provider_user_id(response),
provider_id=provider_id)
return response, query
response, query = provider.authorized_handler(login)()
if query is None:
return response
return login_handler(response, provider, query)
def create_blueprint(state, import_name):
bp = Blueprint(state.blueprint_name, import_name,
url_prefix=state.url_prefix,
template_folder='templates')
bp.route('/login/<provider_id>')(login_callback)
bp.route('/login/<provider_id>',
methods=['POST'])(login)
bp.route('/connect/<provider_id>')(connect_callback)
bp.route('/connect/<provider_id>',
methods=['POST'])(connect)
bp.route('/connect/<provider_id>',
methods=['DELETE'])(remove_all_connections)
bp.route('/connect/<provider_id>/<provider_user_id>',
methods=['DELETE'])(remove_connection)
return bp
|
ShineFan/odoo
|
refs/heads/8.0
|
openerp/addons/base/res/res_currency.py
|
96
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
import time
import math
from openerp import api, fields as fields2
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools import float_round, float_is_zero, float_compare
from openerp.tools.translate import _
import simplejson as json
CURRENCY_DISPLAY_PATTERN = re.compile(r'(\w+)\s*(?:\((.*)\))?')
class res_currency(osv.osv):
def _current_rate(self, cr, uid, ids, name, arg, context=None):
return self._get_current_rate(cr, uid, ids, context=context)
def _current_rate_silent(self, cr, uid, ids, name, arg, context=None):
return self._get_current_rate(cr, uid, ids, raise_on_no_rate=False, context=context)
def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None):
if context is None:
context = {}
res = {}
date = context.get('date') or time.strftime('%Y-%m-%d')
for id in ids:
cr.execute('SELECT rate FROM res_currency_rate '
'WHERE currency_id = %s '
'AND name <= %s '
'ORDER BY name desc LIMIT 1',
(id, date))
if cr.rowcount:
res[id] = cr.fetchone()[0]
elif not raise_on_no_rate:
res[id] = 0
else:
currency = self.browse(cr, uid, id, context=context)
raise osv.except_osv(_('Error!'),_("No currency rate associated for currency '%s' for the given period" % (currency.name)))
return res
_name = "res.currency"
_description = "Currency"
_columns = {
# Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code.
'name': fields.char('Currency', size=3, required=True, help="Currency Code (ISO 4217)"),
'symbol': fields.char('Symbol', size=4, help="Currency sign, to be used when printing amounts."),
'rate': fields.function(_current_rate, string='Current Rate', digits=(12,6),
help='The rate of the currency to the currency of rate 1.'),
# Do not use for computation ! Same as rate field with silent failing
'rate_silent': fields.function(_current_rate_silent, string='Current Rate', digits=(12,6),
help='The rate of the currency to the currency of rate 1 (0 if no rate defined).'),
'rate_ids': fields.one2many('res.currency.rate', 'currency_id', 'Rates'),
'accuracy': fields.integer('Computational Accuracy'),
'rounding': fields.float('Rounding Factor', digits=(12,6)),
'active': fields.boolean('Active'),
'company_id':fields.many2one('res.company', 'Company'),
'base': fields.boolean('Base'),
'position': fields.selection([('after','After Amount'),('before','Before Amount')], 'Symbol Position', help="Determines where the currency symbol should be placed after or before the amount.")
}
_defaults = {
'active': 1,
'position' : 'after',
'rounding': 0.01,
'accuracy': 4,
'company_id': False,
}
_sql_constraints = [
# this constraint does not cover all cases due to SQL NULL handling for company_id,
# so it is complemented with a unique index (see below). The constraint and index
# share the same prefix so that IntegrityError triggered by the index will be caught
# and reported to the user with the constraint's error message.
('unique_name_company_id', 'unique (name, company_id)', 'The currency code must be unique per company!'),
]
_order = "name"
def init(self, cr):
# CONSTRAINT/UNIQUE INDEX on (name,company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate "global" currencies (all having company_id == NULL)
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'res_currency_unique_name_company_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE UNIQUE INDEX res_currency_unique_name_company_id_idx
ON res_currency
(name, (COALESCE(company_id,-1)))""")
date = fields2.Date(compute='compute_date')
@api.one
@api.depends('rate_ids.name')
def compute_date(self):
self.date = self.rate_ids[:1].name
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
results = super(res_currency,self)\
.name_search(cr, user, name, args, operator=operator, context=context, limit=limit)
if not results:
name_match = CURRENCY_DISPLAY_PATTERN.match(name)
if name_match:
results = super(res_currency,self)\
.name_search(cr, user, name_match.group(1), args, operator=operator, context=context, limit=limit)
return results
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','symbol'], context=context, load='_classic_write')
return [(x['id'], tools.ustr(x['name'])) for x in reads]
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
if not default:
default = {}
default.update(name=_("%s (copy)")
% (self.browse(cr, uid, id, context=context).name))
return super(res_currency, self).copy(
cr, uid, id, default=default, context=context)
@api.v8
def round(self, amount):
""" Return `amount` rounded according to currency `self`. """
return float_round(amount, precision_rounding=self.rounding)
@api.v7
def round(self, cr, uid, currency, amount):
"""Return ``amount`` rounded according to ``currency``'s
rounding rules.
:param Record currency: currency for which we are rounding
:param float amount: the amount to round
:return: rounded float
"""
return float_round(amount, precision_rounding=currency.rounding)
@api.v8
def compare_amounts(self, amount1, amount2):
""" Compare `amount1` and `amount2` after rounding them according to
`self`'s precision. An amount is considered lower/greater than
another amount if their rounded value is different. This is not the
same as having a non-zero difference!
For example 1.432 and 1.431 are equal at 2 digits precision, so this
method would return 0. However 0.006 and 0.002 are considered
different (returns 1) because they respectively round to 0.01 and
0.0, even though 0.006-0.002 = 0.004 which would be considered zero
at 2 digits precision.
"""
return float_compare(amount1, amount2, precision_rounding=self.rounding)
@api.v7
def compare_amounts(self, cr, uid, currency, amount1, amount2):
"""Compare ``amount1`` and ``amount2`` after rounding them according to the
given currency's precision..
An amount is considered lower/greater than another amount if their rounded
value is different. This is not the same as having a non-zero difference!
For example 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0.
However 0.006 and 0.002 are considered different (returns 1) because
they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
:param Record currency: currency for which we are rounding
:param float amount1: first amount to compare
:param float amount2: second amount to compare
:return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than,
equal to, or greater than ``amount2``, according to
``currency``'s rounding.
"""
return float_compare(amount1, amount2, precision_rounding=currency.rounding)
@api.v8
def is_zero(self, amount):
""" Return true if `amount` is small enough to be treated as zero
according to currency `self`'s rounding rules.
Warning: ``is_zero(amount1-amount2)`` is not always equivalent to
``compare_amounts(amount1,amount2) == 0``, as the former will round
after computing the difference, while the latter will round before,
giving different results, e.g., 0.006 and 0.002 at 2 digits precision.
"""
return float_is_zero(amount, precision_rounding=self.rounding)
@api.v7
def is_zero(self, cr, uid, currency, amount):
"""Returns true if ``amount`` is small enough to be treated as
zero according to ``currency``'s rounding rules.
Warning: ``is_zero(amount1-amount2)`` is not always equivalent to
``compare_amounts(amount1,amount2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param Record currency: currency for which we are rounding
:param float amount: amount to compare with currency's zero
"""
return float_is_zero(amount, precision_rounding=currency.rounding)
def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):
if context is None:
context = {}
ctx = context.copy()
from_currency = self.browse(cr, uid, from_currency.id, context=ctx)
to_currency = self.browse(cr, uid, to_currency.id, context=ctx)
if from_currency.rate == 0 or to_currency.rate == 0:
date = context.get('date', time.strftime('%Y-%m-%d'))
if from_currency.rate == 0:
currency_symbol = from_currency.symbol
else:
currency_symbol = to_currency.symbol
raise osv.except_osv(_('Error'), _('No rate found \n' \
'for the currency: %s \n' \
'at the date: %s') % (currency_symbol, date))
return to_currency.rate/from_currency.rate
def _compute(self, cr, uid, from_currency, to_currency, from_amount, round=True, context=None):
if (to_currency.id == from_currency.id):
if round:
return self.round(cr, uid, to_currency, from_amount)
else:
return from_amount
else:
rate = self._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)
if round:
return self.round(cr, uid, to_currency, from_amount * rate)
else:
return from_amount * rate
@api.v7
def compute(self, cr, uid, from_currency_id, to_currency_id, from_amount,
round=True, context=None):
context = context or {}
if not from_currency_id:
from_currency_id = to_currency_id
if not to_currency_id:
to_currency_id = from_currency_id
xc = self.browse(cr, uid, [from_currency_id,to_currency_id], context=context)
from_currency = (xc[0].id == from_currency_id and xc[0]) or xc[1]
to_currency = (xc[0].id == to_currency_id and xc[0]) or xc[1]
return self._compute(cr, uid, from_currency, to_currency, from_amount, round, context)
@api.v8
def compute(self, from_amount, to_currency, round=True):
""" Convert `from_amount` from currency `self` to `to_currency`. """
assert self, "compute from unknown currency"
assert to_currency, "compute to unknown currency"
# apply conversion rate
if self == to_currency:
to_amount = from_amount
else:
to_amount = from_amount * self._get_conversion_rate(self, to_currency)
# apply rounding
return to_currency.round(to_amount) if round else to_amount
def get_format_currencies_js_function(self, cr, uid, context=None):
""" Returns a string that can be used to instanciate a javascript function that formats numbers as currencies.
That function expects the number as first parameter and the currency id as second parameter. In case of failure it returns undefined."""
function = ""
for row in self.search_read(cr, uid, domain=[], fields=['id', 'name', 'symbol', 'rounding', 'position'], context=context):
digits = int(math.ceil(math.log10(1 / row['rounding'])))
symbol = row['symbol'] or row['name']
format_number_str = "openerp.web.format_value(arguments[0], {type: 'float', digits: [69," + str(digits) + "]}, 0.00)"
if row['position'] == 'after':
return_str = "return " + format_number_str + " + '\\xA0' + " + json.dumps(symbol) + ";"
else:
return_str = "return " + json.dumps(symbol) + " + '\\xA0' + " + format_number_str + ";"
function += "if (arguments[1] === " + str(row['id']) + ") { " + return_str + " }"
return function
class res_currency_rate(osv.osv):
_name = "res.currency.rate"
_description = "Currency Rate"
_columns = {
'name': fields.datetime('Date', required=True, select=True),
'rate': fields.float('Rate', digits=(12, 6), help='The rate of the currency to the currency of rate 1'),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
}
_defaults = {
'name': lambda *a: time.strftime('%Y-%m-%d 00:00:00'),
}
_order = "name desc"
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if operator in ['=', '!=']:
try:
date_format = '%Y-%m-%d'
if context.get('lang'):
lang_obj = self.pool['res.lang']
lang_ids = lang_obj.search(cr, user, [('code', '=', context['lang'])], context=context)
if lang_ids:
date_format = lang_obj.browse(cr, user, lang_ids[0], context=context).date_format
name = time.strftime('%Y-%m-%d', time.strptime(name, date_format))
except ValueError:
try:
args.append(('rate', operator, float(name)))
except ValueError:
return []
name = ''
operator = 'ilike'
return super(res_currency_rate, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
miyataken999/weblate
|
refs/heads/master
|
weblate/trans/management/commands/lock_translation.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from weblate.trans.management.commands import WeblateCommand
class Command(WeblateCommand):
help = 'locks subproject for editing'
def handle(self, *args, **options):
for subproject in self.get_subprojects(*args, **options):
if not subproject.locked:
subproject.do_lock(None)
|
blankenberg/bioconda-recipes
|
refs/heads/master
|
recipes/varscan/2.3.7/varscan.py
|
62
|
#!/usr/bin/env python
#
# Wrapper script for invoking the VarScan JAR package.
#
# This script is written for use with the Conda package manager and is ported
# from a bash script that does the same thing, adapting the style in
# the peptide-shaker wrapper
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
import subprocess
import sys
from os import access, getenv, path, X_OK
# Expected name of the VarScan JAR file.
JAR_NAME = 'VarScan.jar'
# Default options passed to the `java` executable.
DEFAULT_JVM_MEM_OPTS = ['-Xms512m', '-Xmx1g']
def real_dirname(in_path):
"""Returns the symlink-resolved, canonicalized directory-portion of
the given path."""
return path.dirname(path.realpath(in_path))
def java_executable():
"""Returns the name of the Java executable."""
java_home = getenv('JAVA_HOME')
java_bin = path.join('bin', 'java')
if java_home and access(path.join(java_home, java_bin), X_OK):
return path.join(java_home, java_bin)
return 'java'
def jvm_opts(argv, default_mem_opts=DEFAULT_JVM_MEM_OPTS):
"""Constructs a list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts, prop_opts, pass_args = [], [], []
for arg in argv:
if arg.startswith('-D') or arg.startswith('-XX'):
opts_list = prop_opts
elif arg.startswith('-Xm'):
opts_list = mem_opts
else:
opts_list = pass_args
opts_list.append(arg)
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('org'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = path.join(jar_dir, JAR_NAME)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.