repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
RiceMunk/omnifit
|
omnifit/fitter/fitter.py
|
1
|
23070
|
import numpy as np
from lmfit import minimize, Parameters, Parameter
from astropy import units as u
from .. import spectrum
from functions import *
class Fitter():
"""
A class for multi-component fitting to spectroscopic data of ices.
This is the heart of Omnifit, which receives spectra from the spectrum
module, and is then capable of fitting an arbitrary number of different
components to the target spectrum thus designated.
Attributes
----------
target_x : `numpy.ndarray`
The x axis of the target spectrum, e.g. the wavelength.
target_y : `numpy.ndarray`
The y axis of the target spectrum, e.g. the optical depth.
target_dy : `float`
A single number expressing the average uncertainty of the y
axis data.
modelname : `string`
A human-readable name for the model being fitted.
psf : `Nonetype`, `numpy.ndarray`, or `astropy.convolution.Kernel`
If set, this attribute can be used to give a kernel which should
be used to convolve all the fitted data with.
fitrange : `Nonetype` or `list`
If set, this specifies the inclusive limits to which
the fitting should be performed in x axis coordinates.
For example a fitrange of [[200,250],[300,350]] sets
two fitting windows of 200 to 250, and 300 to 350.
color : `string`
A string inidcating the desired plotting color of the target
data, in a format understandable by matplotlib.
funclist : `list`
A list containing all the fittable functions. Each list entry
is a dictionary containing the following keys and values:
* 'name' : A human-readable name for the function being fitted,
in string format.
* 'color' : A string inidcating the desired plotting color of
the data, in a format understandable by matplotlib.
* 'type' : A string indicating what type of data the function
consists of. It can be either 'analytical' or 'empirical',
indicating an analytical function or empirical spectrum,
respectively.
* 'shape' : The shape of the function being fitted. In the case
of an analytical function, this is a string indicating the
callable name of the function. In the case of an empirical
spectrum, this is the y-axis data from the spectrum.
* 'params' : an lmfit `Parameters` instance containing the fitting
parameters appropriate to the data being fitted.
fitpars : `Parameters`
This is where the fitting parameters are stored during and after
minimization.
fitres : `Minimizer`
The fitting results are stored in this class, as documented in
lmfit.
"""
def __init__(self,x,y,dy=1.0,modelname='Unknown model',psf=None,fitrange=None,color='black'):
"""
Fitter(x,y,dy=1.0,modelname='Unknown model',psf=None,fitrange=None,color='black')
Constructor for the Fitter class. Initialisation happens by
designating the target spectrum.
Parameters
----------
x : `numpy.ndarray`
The x axis of the target spectrum, e.g. the wavelength.
y : `numpy.ndarray`
The y axis of the target spectrum, e.g. the optical depth.
dy : `float`, optional
A single number expressing the average uncertainty of the y
axis data.
modelname : `string`, optional
A human-readable name for the model being fitted.
psf : Nonetype or numpy.ndarray or astropy.convolution.Kernel, optional
This attribute can be used to give a kernel which should be
used to convolve all the fitted data with.
fitrange : `Nonetype` or `list`, optional
If set, this specifies the inclusive limits to which
the fitting should be performed in x axis coordinates.
For example a fitrange of [[200,250],[300,350]] sets
two fitting windows of 200 to 250, and 300 to 350.
color : `string`, optional
A string inidcating the desired plotting color of the target
data, in a format understandable by matplotlib.
"""
if len(x) != len(y):
raise RuntimeError('Input arrays have different sizes.')
self.target_x=x
self.target_y=y
self.target_dy=dy
self.modelname=modelname
self.psf=psf
self.fitrange=fitrange
self.color=color
self.funclist=[]
@classmethod
def fromspectrum(cls,spectrum,**kwargs):
"""
Fitter.fromspectrum(spectrum,**kwargs)
An alternate way to initialise Fitter, by directly giving it
a spectrum. Extracted data from the spectrum are the x, y,
and (if the spectrum has been baselined) dy parameters.
Parameters
----------
spectrum : `omnifit.spectrum.BaseSpectrum` or its child class
The input spectrum.
**kwargs : Arguments, optional
Additional initialisation arguments can be passed to `Fitter`
using this. Note that x and y (and dy, if applicable) are defined
using the data contained in the input spectrum.
"""
if spectrum.baselined:
return cls(spectrum.x.value,spectrum.y.value,spectrum.dy,**kwargs)
else:
return cls(spectrum.x.value,spectrum.y.value,**kwargs)
def add_empirical(self,spectrum,params,funcname=None,color='red'):
"""
add_empirical(spectrum,params,funcname=None,color='red')
Add empirical data in the form of a spectrum to the fitting list.
The spectrum must be interpolated to match the target x axis.
Parameters
----------
spectrum : `spectrum.BaseSpectrum`
The input spectrum.
params : `Parameters`
The input parameters. Specifically this must contain
the 'mul' parameter, which indicates what value the
spectrum will be multiplied with during fitting.
funcname : `Nonetype` or `string`, optional
A human-readable name for the data being fitted.
If this is left as None, the name of the spectrum will
be used.
color : `string`, optional
A string inidcating the desired plotting color of the
data, in a format understandable by matplotlib.
"""
if not(funcname):
funcname=spectrum.name
if not np.all(spectrum.x.value == self.target_x):
raise RuntimeError('Input spectrum x axis does not match the target spectrum x axis.')
self.funclist.append({'type':'empirical','shape':spectrum.y.value,'params':params,'name':funcname,'color':color})
def add_analytical(self,shape,params,funcname='Unknown function',color='red'):
"""
add_analytical(shape,params,funcname=None,color='red')
Add analytical data in the form of a callable function to the
fitting list.
Parameters
----------
shape : `string`
The callable name of the function to be fitted.
params : `Parameters`
The input parameters. These should be formatted in a way that
the function defined by shape can understand them, and that
function should be created in such a way that it can make use
of lmfit parameters.
funcname : `string`, optional
A human-readable name for the data being fitted.
color : `string`, optional
A string inidcating the desired plotting color of the
data, in a format understandable by matplotlib.
"""
self.funclist.append({'type':'analytical','shape':shape,'params':params,'name':funcname,'color':color})
def perform_fit(self,**kwargs):
"""
perform_fit(**kwargs)
Uses `minimize` in lmfit to perform least-squares fitting of all the
functions in the function list to the target data.
Parameters
----------
**kwargs : Arguments, optional
This can be used to give additional arguments for `minimize`.
"""
self.fitpars = self.__extract_pars()
self.fitres=minimize(self.__fit_residual,self.fitpars,**kwargs)
if not(self.fitres.success):
raise RuntimeError('Fitting failed!')
else:
self.fitpars = self.fitres.params
def __fit_residual(self,params,custrange=None):
"""
__fit_residual(params,custrange=None)
This is an internal function used for calculating the total
residual of the data against the fittings function(s), given
a set of lmfit parameters. The residual calculation can also
be limited to a specific x axis range.
Parameters
----------
params : `Parameters`
The parameters used for calculating the residual.
custrange : `Nonetype` or `list`, optional
If set, this specifies the inclusive range within which
the residual is calculated. Otherwise the fitting range
specified during Initialisation is used.
Returns
-------
The residual function within the fitting range with the given
lmfit parameters.
"""
if custrange==None:
fitrange=self.fitrange
else:
fitrange=custrange
residual=1.0*self.target_y
totModel=np.zeros(len(residual))
for indFunc,cFunc in enumerate(self.funclist):
oPar=Parameters()
cParlist = cFunc['params']
for cPar in cParlist.values():
cParams=params[self.__func_ident(indFunc)+cPar.name]
oPar.add(cPar.name,
value=cParams.value,vary=cParams.vary,
min=cParams.min,max=cParams.max,
expr=cParams.expr)
residual-=self.__parse_function(oPar,cFunc)
#Crop out not-numbers and fitting range exterior if necessary
if np.any(fitrange):
fitInd=np.isinf(residual)
for cRange in fitrange:
fitInd=np.logical_or(fitInd,np.logical_and(
np.less_equal(cRange[0],self.target_x),
np.greater_equal(cRange[1],self.target_x)))
else:
fitInd=np.isfinite(residual)
return residual[fitInd]
def chisq(self,checkrange=None):
"""
chisq(checkrange=None)
Return chi squared of fit, either in a custom range
or in the range used by the fit.
Parameters
----------
checkrange : `Nonetype` or `list`, optional
If set, this specifies the inclusive range within which
the chi squared value is calculated. Otherwise the fitting
range specified during Initialisation is used.
Returns
-------
The chi squared within the desired ranged.
"""
residual = self.__fit_residual(self.fitpars,custrange=checkrange)
return np.sum((residual**2.0)/(self.target_dy**2.0))
def plot_fitresults(self,ax,lw=[1,2,3],color_total='blue',legend=True,**kwargs):
"""
plot_fitresults(ax,lw=[1,2,3],color_total='blue',legend=True,**kwargs)
Plot the fitting results to the given matplotlib axis, with a
number of optional parameters specifying how the different plottable
components are presented.
Parameters
----------
axis : `matplotlib.axis`
The axis which the plot will be generated in.
lw : `list`, optional
This list of 3 numbers specifies the line widths of the target
spectrum, the fitted functions, and the total fit, respectively.
color_total : `string`, optional
A string inidcating the desired plotting color of the total sum
of the fit results, in a format understandable by matplotlib.
The colors of the target spectrum and the fitted functions are
specified during their initialisation and addition.
legend : `bool`, optional
If set to True, a legend is automatically created using the
target spectrum and fitted function names.
**kwargs : Arguments, optional
This can be used to pass additional arguments
to `matplotlib.pyplot.plot`, which is used by this
method for its plotting.
"""
ax.plot(self.target_x,self.target_y,color=self.color,lw=lw[0],**kwargs)
legList = [self.modelname]
#totres=self.targ_y+self.fitres.residual
totRes=np.zeros(len(self.target_y))
for indFunc,cFunc in enumerate(self.funclist):
oPar=Parameters()
cParList = cFunc['params']
cCol = cFunc['color']
for cPar in cParList.values():
cFitPar=self.fitpars[self.__func_ident(indFunc)+cPar.name]
oPar.add(cPar.name,
value=cFitPar.value,vary=cFitPar.vary,
min=cFitPar.min,max=cFitPar.max,
expr=cFitPar.expr)
funcRes = self.__parse_function(oPar,cFunc)
totRes+=funcRes
ax.plot(self.target_x,funcRes,lw=lw[1],color=cCol,**kwargs)
legList.append(cFunc['name'])
legList.append('Total fit')
ax.plot(self.target_x,totRes,lw=lw[2],color=color_total,**kwargs)
if legend:
ax.legend(legList,shadow=True)
def fitresults_tofile(self,filename,detection_threshold=5.0):
"""
fitresults_tofile(filename)
Export fit results to two output files which are intended to be
easily readable and paraseable with other software.
The first file is filename.csv, which contains x and y data of
the fitted models, as would be visualized in a plotted fit result.
The first column of the csv is the x value, which is shared by all
models.
The second column is the y value of data that was being fitted to.
The third column is total sum of fitted models.
The fourth to Nth columns are the individual models, in the order
described in the second file, filename.xml.
The second file, filename.xml is an XML file containing additional
information about the fitted data and the fit results which are not
easily representable in a csv-formatted file. This data is
formatted using the following XML elements:
* INFO : Contains all the other elements described below, and has
the attribute "file", which is the name of the csv file pair of
this xml file.
* MODELNAME : Contains the name of the model.
* HAVEPSF : A boolean value indicating whether there is a PSF
associated with the model.
* RMS_DATA : The uncertainty of the data.
* NUMBER_FUNCTIONS : An integer indicating how many functions
have been fitted to the total data.
In addition to the above elements, each fitted function has its own
element, designated FUNCTION, having the attribute "name" which is
the name of the function. FUNCTION contains the following elements:
* TYPE : If the function is an empirical one, this contains the
string "empirical". Otherwise it contains the name of the
called analytical function.
* DETECTION : When generating the contents of this element,
The method is_nondet with the detection threshold designated
by the parameter detection_threshold. The result given by
the method is indicated here with a "True" or "False"
depending on whether the result is considered a detection.
* CSV_COLUMN : Indicates which column in the CSV contains the
fitted data for this function.
* NUMBER_PARAMS : Inidicates how many parameters are used by
this function i.e. the number of PARAMETER elements.
Finally, contained within each FUNCTION element is a number of
PARAMETER elements, which list the best-fit data for each fitted
parameter pertaining to that function. Each PARAMETER element
contains the attribute "name", which tells the name of the
parameter. In addition the following elements are contained by
each PARAMETER element:
* VALUE : The best-fit value for this parameter.
Parameters
----------
filename : `string`
The extensionless version of the desired filename which the
data should be exported to. As a result the files
"filename.csv" and "filename.xml" are created.
detection_threshold : `float`, optional
The threshold of detection to be used in determining whether
the value contained by the DETECTION element is true or not.
"""
filename_csv = filename+'.csv'
filename_xml = filename+'.xml'
file_xml = open(filename_xml,'w')
file_xml.write('<!-- Automatically generated information file for csv file '+filename_csv+'-->\n')
file_xml.write('<INFO file="'+filename_csv+'">\n')
file_xml.write('<MODELNAME>'+self.modelname+'</MODELNAME>\n')
file_xml.write('<HAVEPSF>'+str(self.psf != None)+'</HAVEPSF>\n')
file_xml.write('<RMS_DATA>'+str(self.target_dy)+'</RMS_DATA>\n')
file_xml.write('<NUMBER_FUNCTIONS>'+str(len(self.funclist))+'</NUMBER_FUNCTIONS>\n')
outdata_csv = np.vstack([self.target_x,self.target_y])
outdata_functions = np.empty([0,len(self.target_x)])
totRes = np.zeros(len(self.target_x))
for indFunc,cFunc in enumerate(self.funclist):
file_xml.write('<FUNCTION name="'+cFunc['name']+'">\n')
file_xml.write('<TYPE>')
if cFunc['type'] == 'analytical':
file_xml.write(cFunc['shape'])
elif cFunc['type'] == 'empirical':
file_xml.write('empirical')
else:
file_xml.write('unknown'+'\n')
file_xml.write('</TYPE>\n')
file_xml.write('<DETECTION>'+str(not self.is_nondet(sigma=detection_threshold)[cFunc['name']])+'</DETECTION>\n')
file_xml.write('<CSV_COLUMN>'+str(indFunc+3)+'</CSV_COLUMN>\n')
cParlist = cFunc['params']
file_xml.write('<NUMBER_PARAMS>'+str(len(cParlist))+'</NUMBER_PARAMS>\n')
oPar=Parameters()
for cPar in cParlist.values():
file_xml.write('<PARAMETER name="'+cPar.name+'">\n')
cFitPar=self.fitpars[self.__func_ident(indFunc)+cPar.name]
oPar.add(cPar.name,
value=cFitPar.value,vary=cFitPar.vary,
min=cFitPar.min,max=cFitPar.max,
expr=cFitPar.expr)
file_xml.write('<VALUE>'+str(cFitPar.value)+'</VALUE>\n')
file_xml.write('</PARAMETER>\n')
funcRes = self.__parse_function(oPar,cFunc)
outdata_functions = np.vstack([outdata_functions,funcRes])
totRes += funcRes
file_xml.write('</FUNCTION>\n')
file_xml.write('</INFO>')
file_xml.close()
outdata_csv = np.vstack([outdata_csv,totRes,outdata_functions])
np.savetxt(filename_csv,outdata_csv.transpose(),delimiter=',',header='For info, see '+filename_xml)
def is_nondet(self,sigma=5.0):
"""
is_nondet(sigma=5.0)
Determines whether the fitted functions in the function list can
be considered detections or non-detections using the given detection
thereshold. This is done by comparing the peak of the fitted function
within the fitting range to a multiple (set by the parameter sigma)
of the RMS noise in the target data.
It should be emphasized that unless the dy attribute has been set
during the fitter class initialisation, the results returned by this
method are meaningless.
Parameters
----------
sigma : `float`, optional
The multiplier that should be applied to the noise when comparing
it against the fitted function peaks.
Returns
-------
A dictionary containing boolean values for each function (with
their names as the keys) and the total fit (key 'total'), with
True indicating that the function is considered a non-detection
using the criteria outlined above.
"""
minY = sigma*self.target_dy
out = {}
totRes = np.zeros(len(self.target_x))
for indFunc,cFunc in enumerate(self.funclist):
cParlist = cFunc['params']
oPar=Parameters()
for cPar in cParlist.values():
cFitPar=self.fitpars[self.__func_ident(indFunc)+cPar.name]
oPar.add(cPar.name,
value=cFitPar.value,vary=cFitPar.vary,
min=cFitPar.min,max=cFitPar.max,
expr=cFitPar.expr)
funcRes = self.__parse_function(oPar,cFunc)
if np.max(funcRes) < minY:
out[cFunc['name']] = True
else:
out[cFunc['name']] = False
totRes += funcRes
if np.max(totRes) < minY:
out['total'] = True
else:
out['total'] = False
return out
def fit_results(self):
"""
fit_results()
Return the fitting results as a dictionary.
Parameters
----------
None
Returns
-------
A dictionary containing all the individual functions which were
fitted. The key-value combinations of this dictionary consist of
the function name, and its lmfit Parameters instance, which
contains the best-fit results.
"""
oResults={}
for indFunc,cFunc in enumerate(self.funclist):
oKeyname_base=cFunc['name']
oKeyind=0
oKeyname=oKeyname_base
while oResults.__contains__(oKeyname): #In case of duplicate function names
oKeyind+=1
oKeyname=oKeyname_base+'(duplicate '+str(oKeyind)+')'
oResults[cFunc['name']]=self.__fit_result(indFunc)
return oResults
def __fit_result(self,index):
"""
__fit_result(index)
Return fitting results for a specific function in the internal
function list.
Parameters
----------
index : `int`
Desired index of the function to fetch from the function lsit.
Returns
-------
An `Parameters` instance containing the fitting
results for the desired function.
"""
oParlist=self.funclist[index]['params']
for cParname in oParlist.keys():
coPar=self.fitpars[self.__func_ident(index)+cParname]
coPar.name=cParname
oParlist[cParname]=coPar
return oParlist
def __parse_function(self,params,function):
"""
__parse_function(params,function)
Parse the input function, insert parameters, return result.
Parameters
----------
params : `Parameters`
The lmfit `Parameters` instance to use as input parameters.
function : `dict`
A dictionary formatted in the style that the entries inside
funclist are formatted
Returns
-------
The result of the given function with given parameters.
"""
if function['type']=='empirical':
funcres=muldata(function['shape'],params['mul'].value)
elif function['type']=='analytical':
funcres=globals()[function['shape']](self.target_x,params,self.psf)
else:
raise RuntimeError('Unknown function type!')
return funcres
def __extract_pars(self):
"""
__extract_pars()
Extracts the paramers from the function list and converts them to
a single lmfit Parameters instance, which can then be manipulated
by the residual minimization routines.
Parameters
----------
None
Returns
-------
An lmfit `Parameters` instance containing the parameters
of *all* the fittable functions in a single place.
"""
oPars=Parameters()
for indFunc,cFunc in enumerate(self.funclist):
cParlist = cFunc['params']
for cPar in cParlist.values():
oPars.add(self.__func_ident(indFunc)+cPar.name,
value=cPar.value,vary=cPar.vary,
min=cPar.min,max=cPar.max,
expr=cPar.expr)
return oPars
def __func_ident(self,index):
"""
__func_ident(index)
Generate a unique prefix string for a function, which can be
used by `__extract_pars` to generate its master Parameters list.
Parameters
----------
index : `int`
The index of the function.
Returns
-------
A unique identifier string pertaining to that function, which
can be used to generate unique parameter names.
"""
return '__Func'+str(index)+'__'
|
bsd-3-clause
| 300,213,225,591,060,030
| 37.971284
| 118
| 0.670741
| false
| 3.999653
| false
| false
| false
|
apache/chemistry-cmislib
|
src/cmislib/net.py
|
2
|
5195
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Module that takes care of network communications for cmislib. It does
not know anything about CMIS or do anything special with regard to the
response it receives.
"""
from urllib import urlencode
import logging
import httplib2
class RESTService(object):
"""
Generic service for interacting with an HTTP end point. Sets headers
such as the USER_AGENT and builds the basic auth handler.
"""
def __init__(self):
self.user_agent = 'cmislib/%s +http://chemistry.apache.org/'
self.logger = logging.getLogger('cmislib.net.RESTService')
def get(self,
url,
username=None,
password=None,
**kwargs):
""" Makes a get request to the URL specified."""
headers = {}
if kwargs:
if 'headers' in kwargs:
headers = kwargs['headers']
del kwargs['headers']
self.logger.debug('Headers passed in: %s', headers)
if url.find('?') >= 0:
url = url + '&' + urlencode(kwargs)
else:
url = url + '?' + urlencode(kwargs)
self.logger.debug('About to do a GET on:' + url)
h = httplib2.Http()
h.add_credentials(username, password)
headers['User-Agent'] = self.user_agent
return h.request(url, method='GET', headers=headers)
def delete(self, url, username=None, password=None, **kwargs):
""" Makes a delete request to the URL specified. """
headers = {}
if kwargs:
if 'headers' in kwargs:
headers = kwargs['headers']
del kwargs['headers']
self.logger.debug('Headers passed in: %s', headers)
if url.find('?') >= 0:
url = url + '&' + urlencode(kwargs)
else:
url = url + '?' + urlencode(kwargs)
self.logger.debug('About to do a DELETE on:' + url)
h = httplib2.Http()
h.add_credentials(username, password)
headers['User-Agent'] = self.user_agent
return h.request(url, method='DELETE', headers=headers)
def put(self,
url,
payload,
contentType,
username=None,
password=None,
**kwargs):
"""
Makes a PUT request to the URL specified and includes the payload
that gets passed in. The content type header gets set to the
specified content type.
"""
headers = {}
if kwargs:
if 'headers' in kwargs:
headers = kwargs['headers']
del kwargs['headers']
self.logger.debug('Headers passed in: %s', headers)
if url.find('?') >= 0:
url = url + '&' + urlencode(kwargs)
else:
url = url + '?' + urlencode(kwargs)
self.logger.debug('About to do a PUT on:' + url)
h = httplib2.Http()
h.add_credentials(username, password)
headers['User-Agent'] = self.user_agent
if contentType is not None:
headers['Content-Type'] = contentType
return h.request(url, body=payload, method='PUT', headers=headers)
def post(self,
url,
payload,
contentType,
username=None,
password=None,
**kwargs):
"""
Makes a POST request to the URL specified and posts the payload
that gets passed in. The content type header gets set to the
specified content type.
"""
headers = {}
if kwargs:
if 'headers' in kwargs:
headers = kwargs['headers']
del kwargs['headers']
self.logger.debug('Headers passed in: %s', headers)
if url.find('?') >= 0:
url = url + '&' + urlencode(kwargs)
else:
url = url + '?' + urlencode(kwargs)
self.logger.debug('About to do a POST on:' + url)
h = httplib2.Http()
h.add_credentials(username, password)
headers['User-Agent'] = self.user_agent
if contentType is not None:
headers['Content-Type'] = contentType
return h.request(url, body=payload, method='POST', headers=headers)
|
apache-2.0
| 5,155,019,192,614,484,000
| 31.879747
| 75
| 0.561309
| false
| 4.440171
| false
| false
| false
|
joaomoreno/facilis
|
facilis/core/web.py
|
1
|
2344
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Facilis
# João Moreno <http://www.joaomoreno.com/>
# GPLv3
from SocketServer import ThreadingMixIn
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import sys
from misc import UnknownURL, UnknownFile
from os.path import split, getsize
from pkg_resources import resource_string, resource_filename
CHUNK_SIZE = 65536
print sys.version_info
class HTTPRequestHandler(BaseHTTPRequestHandler):
"""HTTP request handler for Facilis"""
def do_GET(self):
"""Respond to a GET request."""
req = self.path[1:]
if req == "logo.png":
self.__sendFile(resource_filename(__name__, "resources/logo.png"), "image/png")
else:
try:
fname, mime = self.server.getFile(req)
print fname, mime
self.__sendFile(fname, mime)
except:
self.send_response(404)
self.send_header("Content-type", "text/html")
self.send_header("Connection", "close")
self.end_headers()
self.wfile.write(resource_string(__name__, "resources/404.html"))
def __sendFile(self, fname, mime):
print mime
name = split(fname)[1]
self.send_response(200)
if mime:
self.send_header("Content-type", mime)
self.send_header("Connection", "close")
self.send_header("Content-Disposition", 'attachment; filename="' + name + '"')
self.send_header("Content-Length", getsize(fname))
self.end_headers()
f = open(fname, "rb")
self.wfile.write(f.read())
f.close()
class FacilisServer(ThreadingMixIn, HTTPServer):
def __init__(self, address, handler, app):
HTTPServer.__init__(self, address, handler)
self.app = app
def getFile(self, url):
return self.app.getFile(url)
class ServerHandler(Thread):
def __init__(self, app, port):
Thread.__init__(self)
self.app = app
self.port = port
def run(self):
try:
httpd = FacilisServer(('', self.port), HTTPRequestHandler, self.app)
httpd.serve_forever()
httpd.server_close()
except:
exit(-2)
|
gpl-3.0
| 2,496,950,977,492,467,000
| 29.038462
| 91
| 0.578745
| false
| 3.971186
| false
| false
| false
|
wholeshoot/mongo_datatables
|
mongo_datatables/editor.py
|
1
|
2907
|
from bson.objectid import ObjectId
import json
class Editor(object):
def __init__(self, pymongo_object, collection, request_args, doc_id):
"""
:param pymongo_object:
:param collection:
:param request_args:
:param doc_id:
"""
self.mongo = pymongo_object
self.collection = collection
self.request_args = request_args
self.doc_id = doc_id
@property
def db(self):
return self.mongo.db
@property
def action(self):
return self.request_args.get("action")
@property
def data(self):
return self.request_args.get("data")
@property
def list_of_ids(self):
return self.doc_id.split(",")
def remove(self):
"""
:return: empty {}
"""
for _id in self.list_of_ids:
self.db[self.collection].delete_one({"_id": ObjectId(_id)})
return {}
def create(self):
"""
Use PyMongo insert_one to add a document to a collection. self.data contains the new entry with no _id, like
{'0': {'val': 'test', 'group': 'test', 'text': 'test'}}
:return: output like {'data': [{'DT_RowID': 'x', ... }]}
"""
data_obj = {k: v for k, v in self.data['0'].items() if v} # ignore keys that might not exist
# try to save an object or array
for key, val in data_obj.items():
try:
data_obj[key] = json.loads(val)
except (json.decoder.JSONDecodeError, TypeError):
pass
self.db[self.collection].insert_one(data_obj)
# After insert, data_obj now includes an _id of type ObjectId, but we need it named DT_RowId and of type str.
data_obj["DT_RowId"] = str(data_obj.pop("_id", None))
return {"data": [data_obj]}
def edit(self):
"""
:return: output like { 'data': [ {'DT_RowID': 'x', ... }, {'DT_RowID': 'y',... }, ...]}
"""
data = []
for _id in self.list_of_ids:
doc = {k: v for k, v in self.data[_id].items() if v} # ignore keys that might not exist
# try to save an object or array
for key, val in doc.items():
try:
doc[key] = json.loads(val)
except (json.decoder.JSONDecodeError, TypeError):
pass
self.db[self.collection].update_one({"_id": ObjectId(_id)}, {"$set": doc}, upsert=False)
# add the _id to the doc object
doc["DT_RowId"] = _id
# add each doc object to the data array
data.append(doc)
return {"data": data}
def update_rows(self):
if self.action == "remove":
return self.remove()
elif self.action == "create":
return self.create()
elif self.action == "edit":
return self.edit()
|
mit
| 7,445,774,761,510,483,000
| 27.782178
| 117
| 0.522876
| false
| 3.83004
| false
| false
| false
|
shawnadelic/shuup
|
shuup/core/models/_suppliers.py
|
1
|
3709
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum, EnumIntegerField
from jsonfield import JSONField
from shuup.core.fields import InternalIdentifierField
from shuup.core.modules import ModuleInterface
from shuup.utils.analog import define_log_model
from ._base import ShuupModel
class SupplierType(Enum):
INTERNAL = 1
EXTERNAL = 2
class Labels:
INTERNAL = _('internal')
EXTERNAL = _('external')
@python_2_unicode_compatible
class Supplier(ModuleInterface, ShuupModel):
default_module_spec = "shuup.core.suppliers:BaseSupplierModule"
module_provides_key = "supplier_module"
identifier = InternalIdentifierField(unique=True)
name = models.CharField(verbose_name=_("name"), max_length=64)
type = EnumIntegerField(SupplierType, verbose_name=_("supplier type"), default=SupplierType.INTERNAL)
stock_managed = models.BooleanField(verbose_name=_("stock managed"), default=False)
module_identifier = models.CharField(max_length=64, blank=True, verbose_name=_('module'))
module_data = JSONField(blank=True, null=True, verbose_name=_("module data"))
def __str__(self):
return self.name
def get_orderability_errors(self, shop_product, quantity, customer):
"""
:param shop_product: Shop Product
:type shop_product: shuup.core.models.ShopProduct
:param quantity: Quantity to order
:type quantity: decimal.Decimal
:param contect: Ordering contact.
:type contect: shuup.core.models.Contact
:rtype: iterable[ValidationError]
"""
return self.module.get_orderability_errors(shop_product=shop_product, quantity=quantity, customer=customer)
def get_stock_statuses(self, product_ids):
"""
:param product_ids: Iterable of product IDs
:return: Dict of {product_id: ProductStockStatus}
:rtype: dict[int, shuup.core.stocks.ProductStockStatus]
"""
return self.module.get_stock_statuses(product_ids)
def get_stock_status(self, product_id):
"""
:param product_id: Product ID
:type product_id: int
:rtype: shuup.core.stocks.ProductStockStatus
"""
return self.module.get_stock_status(product_id)
def get_suppliable_products(self, shop, customer):
"""
:param shop: Shop to check for suppliability
:type shop: shuup.core.models.Shop
:param customer: Customer contact to check for suppliability
:type customer: shuup.core.models.Contact
:rtype: list[int]
"""
return [
shop_product.pk
for shop_product
in self.shop_products.filter(shop=shop)
if shop_product.is_orderable(self, customer, shop_product.minimum_purchase_quantity)
]
def adjust_stock(self, product_id, delta, created_by=None, type=None):
from shuup.core.suppliers.base import StockAdjustmentType
adjustment_type = type or StockAdjustmentType.INVENTORY
return self.module.adjust_stock(product_id, delta, created_by=created_by, type=adjustment_type)
def update_stock(self, product_id):
return self.module.update_stock(product_id)
def update_stocks(self, product_ids):
return self.module.update_stocks(product_ids)
SupplierLogEntry = define_log_model(Supplier)
|
agpl-3.0
| 7,370,786,921,387,717,000
| 36.09
| 115
| 0.689674
| false
| 3.811922
| false
| false
| false
|
bioinfo-core-BGU/neatseq-flow_modules
|
neatseq_flow_modules/RNA_seq/trinity_statistics.py
|
1
|
8022
|
# -*- coding: UTF-8 -*-
"""
``trinity_statistics``
-----------------------------------------------------------------
:Authors: Menachem Sklarz
:Affiliation: Bioinformatics core facility
:Organization: National Institute of Biotechnology in the Negev, Ben Gurion University.
A class that defines a module for running ``abundance_estimates_to_matrix.pl`` on genes or isoforms counts tables produced by ``align_and_estimate_abundance.pl``
See the script documentation `here <https://github.com/trinityrnaseq/trinityrnaseq/wiki/Trinity-Transcript-Quantification#building-expression-matrices>`_.
This conversion makes sense at the project level - combining all sample matrices into a single, normalized, comparison table. However, for completeness, we included a sample scope option for running the script in each sample separately.
.. Note:: ``scope`` is not defined for this module. It only makes sense to run ``abundance_estimates_to_matrix`` when comparing many samples against a single assembly
Requires
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Either ``genes.results`` or ``isoforms.results`` files in the following slots:
* ``sample_data[<sample>]["genes.results"]``
* ``sample_data[<sample>]["isoforms.results"]``
Output:
~~~~~~~~~~~~~
* Creates the following files in the following slots:
* ``<project>.counts.matrix`` in ``self.sample_data["project_data"]["counts.matrix"]``
* ``<project>.not_cross_norm.fpkm.tmp`` in ``self.sample_data["project_data"]["not_cross_norm.fpkm.tmp"]``
* ``<project>.not_cross_norm.fpkm.tmp.TMM_info.txt`` in ``self.sample_data["project_data"]["not_cross_norm.fpkm.tmp.TMM_info.txt"]``
* ``<project>.TMM.fpkm.matrix`` in ``self.sample_data["project_data"]["TMM.fpkm.matrix"]``
Parameters that can be set
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. csv-table::
:header: "Parameter", "Values", "Comments"
"use_genes", "", "Use 'genes.results' matrix. If not passed, use 'isoforms.results'"
"redirects: --gene_trans_map", "path or 'none'", "If path, use path as gene_trans_map for all samples. If 'none', does not produce gene level estimates. **In order to use an internal gene_trans_map, do not pass this parameter!**"
Lines for parameter file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
trin_map_stats:
module: trinity_statistics
base: trin_map1
script_path: /path/to/abundance_estimates_to_matrix.pl
use_genes:
redirects:
--est_method: RSEM
References
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Grabherr, M.G., Haas, B.J., Yassour, M., Levin, J.Z., Thompson, D.A., Amit, I., Adiconis, X., Fan, L., Raychowdhury, R., Zeng, Q. and Chen, Z., 2011. **Trinity: reconstructing a full-length transcriptome without a genome from RNA-Seq data**. *Nature biotechnology*, 29(7), p.644.
"""
import os
import sys
import re
from neatseq_flow.PLC_step import Step,AssertionExcept
__author__ = "Menachem Sklarz"
__version__ = "1.6.0"
class Step_trinity_statistics(Step):
def step_specific_init(self):
self.shell = "bash" # Can be set to "bash" by inheriting instances
self.file_tag = "trin_stats"
if "use_genes" not in self.params:
self.write_warning("'use_genes' not passed. Using 'isoforms.results' matrix")
def step_sample_initiation(self):
""" A place to do initiation stages following setting of sample_data
Here you should do testing for dependency output. These will NOT exist at initiation of this instance. They are set only following sample_data updating
"""
# In new version, --gene_trans_map is compulsory! Adding
# If not passed:
# If one exists, use it.
# Otherwise, specify "none"
# If passed:
# If with value, use the value and set project "gene_trans_map" to value
# Otherwise, use existing
if "--gene_trans_map" not in self.params["redir_params"]:
if "gene_trans_map" in self.sample_data["project_data"]:
self.params["redir_params"]["--gene_trans_map"] = self.sample_data["project_data"]["gene_trans_map"]
self.use_gene_trans_map = True
else:
self.params["redir_params"]["--gene_trans_map"] = "none"
self.use_gene_trans_map = False
else: # --gene_trans_map is defined in redir_params
if self.params["redir_params"]["--gene_trans_map"] == None:
raise AssertionExcept("You passed --gene_trans_map with no value. Please specify path or 'none'")
elif self.params["redir_params"]["--gene_trans_map"] == "none":
self.use_gene_trans_map = False
else:
self.sample_data["project_data"]["gene_trans_map"] = self.params["redir_params"]["--gene_trans_map"]
self.use_gene_trans_map = True
def create_spec_wrapping_up_script(self):
""" Add stuff to check and agglomerate the output data
"""
def build_scripts(self):
# Name of specific script:
self.spec_script_name = self.set_spec_script_name()
self.script = ""
# This line should be left before every new script. It sees to local issues.
# Use the dir it returns as the base_dir for this step.
use_dir = self.local_start(self.base_dir)
prefix = self.sample_data["Title"]
self.script += self.get_script_const()
self.script += "--out_prefix %s \\\n\t" % os.sep.join([use_dir, prefix])
# type2use is 'genes.results' or 'isoforms.results'. This is used to then select the correct slot from "mapping"
type2use = "genes.results" if "use_genes" in list(self.params.keys()) else "isoforms.results"
for sample in self.sample_data["samples"]:
try:
self.script += "%s \\\n\t" % self.sample_data[sample][type2use]
except:
raise AssertionExcept("file type %s does not exist for sample." % type2use, sample)
self.script = self.script.rstrip("\\\n\t")
self.script += "\n\n"
if not "version" in self.params or self.params["version"].lower() == "new":
# Storing all output files even though probably not very useful downstream...
self.sample_data["project_data"]["isoform.raw_counts"] = os.sep.join([self.base_dir, "%s.isoform.counts.matrix" % prefix])
self.sample_data["project_data"]["isoform.norm_counts"] = os.sep.join([self.base_dir, "%s.isoform.TPM.not_cross_norm" % prefix])
self.stamp_file(self.sample_data["project_data"]["isoform.raw_counts"] )
self.stamp_file(self.sample_data["project_data"]["isoform.norm_counts"])
if(self.use_gene_trans_map): # True when --gene_trans_map is not "none"
self.sample_data["project_data"]["gene.raw_counts"] = os.sep.join([self.base_dir, "%s.gene.counts.matrix" % prefix])
self.sample_data["project_data"]["gene.norm_counts"] = os.sep.join([self.base_dir, "%s.gene.TPM.not_cross_norm" % prefix])
self.stamp_file(self.sample_data["project_data"]["gene.raw_counts"] )
self.stamp_file(self.sample_data["project_data"]["gene.norm_counts"])
else:
self.write_warning("Not storing output files for old version of trinity. "
"If required, load the appropriate files with a 'manage_types' module")
# Move all files from temporary local dir to permanent base_dir
# Sees to copying local files to final destination (and other stuff)
self.local_finish(use_dir,self.base_dir)
self.create_low_level_script()
|
gpl-3.0
| -7,218,198,031,064,507,000
| 42.597826
| 279
| 0.596111
| false
| 3.715609
| false
| false
| false
|
shoopio/shoop
|
shuup/admin/modules/attributes/__init__.py
|
2
|
1483
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from shuup.admin.base import AdminModule, MenuEntry
from shuup.admin.menu import STOREFRONT_MENU_CATEGORY
from shuup.admin.utils.urls import derive_model_url, get_edit_and_list_urls
from shuup.core.models import Attribute
class AttributeModule(AdminModule):
name = _("Attributes")
breadcrumbs_menu_entry = MenuEntry(text=name, url="shuup_admin:attribute.list")
def get_urls(self):
return get_edit_and_list_urls(
url_prefix="^attributes",
view_template="shuup.admin.modules.attributes.views.Attribute%sView",
name_template="attribute.%s"
)
def get_menu_category_icons(self):
return {self.name: "fa fa-tags"}
def get_menu_entries(self, request):
return [
MenuEntry(
text=_("Attributes"),
icon="fa fa-tags",
url="shuup_admin:attribute.list",
category=STOREFRONT_MENU_CATEGORY,
ordering=8
)
]
def get_model_url(self, object, kind, shop=None):
return derive_model_url(Attribute, "shuup_admin:attribute", object, kind)
|
agpl-3.0
| -2,326,765,358,164,317,000
| 32.704545
| 83
| 0.650708
| false
| 3.652709
| false
| false
| false
|
jehine-MSFT/azure-storage-python
|
azure/storage/_common_conversion.py
|
1
|
2874
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import hashlib
import hmac
import sys
from dateutil.tz import tzutc
from .models import (
_unicode_type,
)
if sys.version_info < (3,):
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
def _to_str(value):
return _str(value) if value is not None else None
def _int_to_str(value):
return str(int(value)) if value is not None else None
def _bool_to_str(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _to_utc_datetime(value):
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
def _datetime_to_utc_string(value):
# Azure expects the date value passed in to be UTC.
# Azure will always return values as UTC.
# If a date is passed in without timezone info, it is assumed to be UTC.
if value is None:
return None
if value.tzinfo:
value = value.astimezone(tzutc())
return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = _decode_base64_to_bytes(key)
else:
if isinstance(key, _unicode_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
def _lower(text):
return text.lower()
|
apache-2.0
| -4,880,600,829,352,784,000
| 26.883495
| 76
| 0.626741
| false
| 3.598997
| false
| false
| false
|
Artimi/waktu
|
waktu/timetracker.py
|
1
|
6274
|
#!/usr/bin/env python2.7
#-*- coding: UTF-8 -*-
from category import Category
from gi.repository import Wnck, Gdk, Gtk, GObject, Notify, GLib
from activityrecord import ActivityRecord
from threading import Thread, Event
from time import sleep, time
import copy
class TimeTracker(Thread):
"""Core module of this project. It's running in separated thread
to not block GUI."""
stopthread = Event()
track = Event()
mode = Event()
def __init__(self, _stat, _categories, _activities, _configuration):
Thread.__init__(self)
self.categories = _categories
self.activities = _activities
self.stat = _stat
self.lastActivity = ActivityRecord()
self.screen = Wnck.Screen.get_default()
self.n = Notify.Notification()
self.tmpName = ''
if _configuration.getValue('state'):
self.track.set()
else:
self.track.clear()
if _configuration.getValue('mode'):
self.mode.set()
else:
self.mode.clear()
def run(self):
"""Start tracking user activities"""
while not self.stopthread.isSet():
sleep(1)
"""Skip tracking if it's disabled"""
if not self.track.isSet():
continue
Gdk.threads_enter()
GObject.idle_add(self.screen.force_update)
active_window = self.screen.get_active_window()
"""Skip if there is no active window"""
if active_window == None:
Gdk.threads_leave()
continue
appName = active_window.get_application().get_name()
appPid = active_window.get_application().get_pid()
"""If the learning mode is activive, only append an activity"""
if self.mode.isSet():
self.activities.addActivity(appName)
Gdk.threads_leave()
continue
if self.lastActivity.getActivity().getPid() == appPid:
"""Still the same activity, just actualize the end time"""
self.lastActivity.setEndTime(time())
else:
"""New activity, actualize the lastActivity and append
the new activity"""
if self.lastActivity.getActivity().getPid() != 0:
tmp = copy.deepcopy(self.lastActivity)
self.stat.appendActivityRecord(tmp)
self.activities.addActivity(tmp.getActivity().getName())
print "DBG: Zmena aktivity! Ulozena aktivita %s (%s)" % (tmp.getActivity().getName(), tmp.getCategory())
self.lastActivity.getActivity().setName(appName)
self.lastActivity.getActivity().setPid(appPid)
self.lastActivity.setCategory('OTHER')
self.getCorrectCategory()
self.lastActivity.setStartTime(time())
self.lastActivity.setEndTime(time())
Gdk.threads_leave()
if self.track.isSet() and not self.mode.isSet():
tmp = copy.deepcopy(self.lastActivity)
self.stat.appendActivityRecord(tmp)
print "DBG: Ulozena aktivita %s (%s)" % (tmp.getActivity().getName(), tmp.getCategory())
"""Store all records to file to make them persistent"""
self.stat.storeRecords()
self.activities.storeActivities()
def stop(self):
"""Stop the tracking system, uses id stored in initialization"""
self.stopthread.set()
def getCorrectCategory(self, _activity = None):
"""Find out category where the activity belongs to"""
if _activity == None:
_activity = self.lastActivity.getActivity()
activityCategories = self.categories.getContainingCategories(_activity)
if len(activityCategories) == 0:
"""The activity isn't in any category"""
self.lastActivity.setCategory('OTHER')
elif len(activityCategories) == 1:
"""The activity is in exactly one category"""
self.lastActivity.setCategory(activityCategories[0].name)
else:
"""The activity is in more than one category.
The Waktu needs to ask user."""
lastOccurrence = self.stat.getLastOccurrence(_activity.getName())
if lastOccurrence == None or (time() - lastOccurrence.getEndTime()) > 600 : # 10 minutes is the default time to remember users choice
self.askUser(_activity, activityCategories)
else:
self.lastActivity.setCategory(lastOccurrence.getCategory())
def askUser(self, _activity, _categories):
"""Creates a notification and asks a user where the activity belongs to"""
if not Notify.is_initted():
Notify.init('Waktu')
self.n.clear_hints()
self.n.clear_actions()
self.n.set_property('summary','Kam patří aktivita %s?' % _activity.getName())
self.n.set_property('body', 'Zdá se, že tuto aktivitu máte zvolenou ve více kategoriích. Zvolte, prosím, níže jednu, do které spadá tato aktivita práve teď.')
self.n.set_property('icon_name','dialog-question')
self.n.set_urgency(Notify.Urgency.NORMAL)
self.n.set_timeout(Notify.EXPIRES_NEVER)
self.n.set_hint("resident", GLib.Variant('b',True))
for cat in _categories:
self.n.add_action(cat.name, cat.name, self.getUserAnswer, _activity, None)
self.n.add_action("OTHER", "Jinam", self.getUserAnswer, _activity, None)
self.n.show()
def getUserAnswer(self, n, _action, _data):
"""Process user answer and delegate result"""
n.close()
if self.lastActivity.getActivity().getName() == _data.getName():
"""The focused app is still the same"""
self.lastActivity.setCategory(_action)
else:
"""There is another activity, need to find it backwards"""
self.stat.getLastOccurrence(_data.getName()).setCategory(_action)
|
mit
| 6,195,104,910,922,810,000
| 40.184211
| 166
| 0.579393
| false
| 4.24983
| false
| false
| false
|
pyfa-org/Pyfa
|
gui/builtinContextMenus/itemProject.py
|
1
|
1630
|
import wx
import gui.fitCommands as cmd
import gui.mainFrame
from gui.contextMenu import ContextMenuSingle
from service.fit import Fit
_t = wx.GetTranslation
class ProjectItem(ContextMenuSingle):
visibilitySetting = 'project'
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
def display(self, callingWindow, srcContext, mainItem):
if srcContext not in ("marketItemGroup", "marketItemMisc") or self.mainFrame.getActiveFit() is None:
return False
if mainItem is None:
return False
sFit = Fit.getInstance()
fitID = self.mainFrame.getActiveFit()
fit = sFit.getFit(fitID)
if fit.isStructure:
return False
return mainItem.isType("projected")
def getText(self, callingWindow, itmContext, mainItem):
return _t("Project {0} onto Fit").format(itmContext)
def activate(self, callingWindow, fullContext, mainItem, i):
fitID = self.mainFrame.getActiveFit()
if mainItem.isModule:
success = self.mainFrame.command.Submit(cmd.GuiAddProjectedModuleCommand(fitID=fitID, itemID=mainItem.ID))
elif mainItem.isDrone:
success = self.mainFrame.command.Submit(cmd.GuiAddProjectedDroneCommand(fitID=fitID, itemID=mainItem.ID))
elif mainItem.isFighter:
success = self.mainFrame.command.Submit(cmd.GuiAddProjectedFighterCommand(fitID=fitID, itemID=mainItem.ID))
else:
success = False
if success:
self.mainFrame.additionsPane.select('Projected', focus=False)
ProjectItem.register()
|
gpl-3.0
| 5,919,210,914,069,837,000
| 31.6
| 119
| 0.682822
| false
| 3.781903
| false
| false
| false
|
techtonik/warehouse
|
warehouse/views.py
|
1
|
1071
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from warehouse.utils import cache, fastly, render_response
@cache(browser=1, varnish=120)
@fastly("index")
def index(app, request):
return render_response(
app, request, "index.html",
project_count=app.db.packaging.get_project_count(),
download_count=app.db.packaging.get_download_count(),
recently_updated=app.db.packaging.get_recently_updated(),
)
|
apache-2.0
| -4,845,347,867,932,633,000
| 37.25
| 74
| 0.741363
| false
| 3.852518
| false
| false
| false
|
CamDavidsonPilon/lifelines
|
lifelines/utils/lowess.py
|
1
|
2541
|
# -*- coding: utf-8 -*-
"""
This module implements the Lowess function for nonparametric regression.
Functions:
lowess Fit a smooth nonparametric regression curve to a scatterplot.
For more information, see
William S. Cleveland: "Robust locally weighted regression and smoothing
scatterplots", Journal of the American Statistical Association, December 1979,
volume 74, number 368, pp. 829-836.
William S. Cleveland and Susan J. Devlin: "Locally weighted regression: An
approach to regression analysis by local fitting", Journal of the American
Statistical Association, September 1988, volume 83, number 403, pp. 596-610.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
# Slight updates in lifelines 0.16.0+, 2018
from math import ceil
import warnings
import numpy as np
from scipy import linalg
def lowess(x, y, f=2.0 / 3.0, iterations=1):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
"""
n = len(x)
r = int(ceil(f * n))
h = np.clip([np.sort(np.abs(x - x[i]))[r] for i in range(n)], 1e-8, np.inf)
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = (1 - w ** 3) ** 3
yest = np.zeros(n)
delta = np.ones(n)
for _ in range(iterations):
for i in range(n):
weights = delta * w[:, i]
b = np.array([np.sum(weights * y), np.sum(weights * y * x)])
A = np.array([[np.sum(weights), np.sum(weights * x)], [np.sum(weights * x), np.sum(weights * x * x)]])
# I think it is safe to assume this.
# pylint: disable=unexpected-keyword-arg
try:
beta = linalg.solve(A, b, assume_a="pos", check_finite=False)
except np.linalg.LinAlgError:
beta = [0, 0]
yest[i] = beta[0] + beta[1] * x[i]
residuals = y - yest
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta ** 2) ** 2
return yest
|
mit
| -498,146,806,779,797,570
| 38.703125
| 114
| 0.637151
| false
| 3.245211
| false
| false
| false
|
consultit/Ely
|
ely/direct/data_structures_and_algorithms/ch05/high_scores.py
|
1
|
3387
|
# Copyright 2013, Michael H. Goldwasser
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser
# John Wiley & Sons, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class GameEntry:
"""Represents one entry of a list of high scores."""
def __init__(self, name, score):
"""Create an entry with given name and score."""
self._name = name
self._score = score
def get_name(self):
"""Return the name of the person for this entry."""
return self._name
def get_score(self):
"""Return the score of this entry."""
return self._score
def __str__(self):
"""Return string representation of the entry."""
return '({0}, {1})'.format(self._name, self._score) # e.g., '(Bob, 98)'
class Scoreboard:
"""Fixed-length sequence of high scores in nondecreasing order."""
def __init__(self, capacity=10):
"""Initialize scoreboard with given maximum capacity.
All entries are initially None.
"""
self._board = [None] * capacity # reserve space for future scores
self._n = 0 # number of actual entries
def __getitem__(self, k):
"""Return entry at index k."""
return self._board[k]
def __str__(self):
"""Return string representation of the high score list."""
return '\n'.join(str(self._board[j]) for j in range(self._n))
def add(self, entry):
"""Consider adding entry to high scores."""
score = entry.get_score()
# Does new entry qualify as a high score?
# answer is yes if board not full or score is higher than last entry
good = self._n < len(self._board) or score > self._board[-1].get_score()
if good:
if self._n < len(self._board): # no score drops from list
self._n += 1 # so overall number increases
# shift lower scores rightward to make room for new entry
j = self._n - 1
while j > 0 and self._board[j-1].get_score() < score:
self._board[j] = self._board[j-1] # shift entry from j-1 to j
j -= 1 # and decrement j
self._board[j] = entry # when done, add new entry
if __name__ == '__main__':
board = Scoreboard(5)
for e in (
('Rob', 750), ('Mike',1105), ('Rose', 590), ('Jill', 740),
('Jack', 510), ('Anna', 660), ('Paul', 720), ('Bob', 400),
):
ge = GameEntry(e[0], e[1])
board.add(ge)
print('After considering {0}, scoreboard is:'.format(ge))
print(board)
print()
|
lgpl-3.0
| -1,765,403,955,418,676,700
| 36.633333
| 81
| 0.581931
| false
| 3.961404
| false
| false
| false
|
3dbug/blender
|
DialScale.py
|
1
|
4821
|
bl_info = {
"name": "Dial and Scale",
"author": "stacker, sambler",
"version": (1, 2),
"blender": (2, 80, 0),
"location": "3DView > Add > Curve > Dial and Scale",
"description": "Add an array of text number objects or watch dials.",
"warning": "",
"wiki_url": "https://github.com/3dbug/blender/blob/master/DialScale.py",
"tracker_url": "https://github.com/3dbug/blender/issues",
"category": "Add Curve"}
import bpy,math
import mathutils
from bpy.props import IntProperty,FloatProperty,StringProperty,EnumProperty,BoolProperty
fonts_list = []
def getFonts(self, context):
fonts_list = []
for afont in bpy.data.fonts:
fonts_list.append(( afont.name, afont.name,""))
if len(fonts_list) == 0:
fonts_list.append(("Bfont","Bfont",""))
return fonts_list
class DialScale(bpy.types.Operator):
""" Creates an array of text elements"""
bl_idname = "curve.dial_scale"
bl_label = "Create Dials and Scales"
bl_options = {'REGISTER', 'UNDO'}
start : IntProperty(name="Start",description="Start value",min=-10000, max=10000,default=1 )
count : IntProperty(name="Count",description="Number of items to create",min=1, max=100, default=12 )
step : IntProperty(name="Step",description="Increment of number",min=-10000, max=10000, default=1 )
offset : FloatProperty(name="Offset",description="Distance",min=0.01, max=100.0, default=2.5 )
dialType : EnumProperty( name="Dial Type",description="Basis of creating the dial", items=[("circular","circular","A round dial"),("horizontal","horizontal","A horizontal scale"),("vertical","vertical","A vertical scale")], default="circular")
rotate : FloatProperty(name="Rotation",description="Start rotation of first item",min=-360.0, max=360.0, default=0.0 )
segment : FloatProperty(name="Segment",description="Circle Segment",min=-360.0, max=360.0, default=360.0 )
ticks : IntProperty(name="Ticks",description="Number of ticks between numbers",min=0, max=100, default=5 )
tickOffset : FloatProperty(name="Tick Offset",description="Distance to offset the Ticks",min=-100.0, max=100.0, default=1.3 )
font : EnumProperty( name="Fonts",items=getFonts)
def execute(self, context):
x = -self.offset
y = 0.0
angle = math.radians( self.rotate ) - math.pi/2
angle_step = -math.radians( self.segment ) / self.count
angle = angle - angle_step
pos = self.start - 1
num = self.start
end = self.count + self.start - 1
while pos < end:
if self.dialType == "circular":
vec3d = mathutils.Vector((self.offset, 0, 0))
vpos = vec3d @ mathutils.Matrix.Rotation( angle , 3, 'Z')
elif self.dialType == "horizontal":
x = x + self.offset
vpos=(x,0,0)
else:
y = y + self.offset
vpos = (0,y,0)
bpy.ops.object.text_add()
ob=bpy.context.object
ob.data.body = str(num)
ob.data.font = bpy.data.fonts[ self.font ]
ob.data.align_x = ob.data.align_y = 'CENTER'
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
bpy.ops.transform.translate(value=vpos)
for t in range(0,self.ticks):
bpy.ops.mesh.primitive_plane_add(size=.04 if t == 0 else .02)
if self.dialType == "circular":
tick_step = angle_step / self.ticks
vec3d = mathutils.Vector((self.offset*self.tickOffset, 0, 0))
tpos = vec3d @ mathutils.Matrix.Rotation( (angle + (t*tick_step)) , 3, 'Z')
bpy.ops.transform.resize(value=(6,1,1))
bpy.ops.transform.rotate(value= angle + t*tick_step, axis=(0, 0, 1))
elif self.dialType == "horizontal" and pos < end-1:
tick_step = self.offset / self.ticks
tpos=(x+t*tick_step,self.tickOffset,0)
bpy.ops.transform.resize(value=(1,6,1))
elif pos < end -1:
tick_step = self.offset / self.ticks
tpos=(self.tickOffset,y+t*tick_step,0)
bpy.ops.transform.resize(value=(6,1,1))
bpy.ops.transform.translate(value=tpos)
angle = angle - angle_step
pos = pos + 1
num = num + self.step
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(DialScale.bl_idname, icon='PLUGIN')
def register():
bpy.utils.register_class(DialScale)
bpy.types.VIEW3D_MT_curve_add.append(menu_func)
def unregister():
bpy.utils.unregister_class(DialScale)
bpy.types.VIEW3D_MT_curve_add.remove(menu_func)
if __name__ == "__main__":
register()
|
gpl-3.0
| -1,068,458,594,242,140,500
| 42.432432
| 247
| 0.594897
| false
| 3.438659
| false
| false
| false
|
exekias/django-achilles
|
doc/conf.py
|
1
|
8035
|
# -*- coding: utf-8 -*-
#
# Django Achilles documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 9 01:46:37 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from django.conf import settings
settings.configure()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Achilles'
copyright = u'2013, Carlos Pérez-Aradros Herce <exekias@gmail.com>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __import__('achilles').get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoAchillesdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoAchilles.tex', u'Django Achilles Documentation',
u'Carlos Pérez-Aradros Herce', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangoachilles', u'Django Achilles Documentation',
[u'Carlos Pérez-Aradros Herce'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoAchilles', u'Django Achilles Documentation',
u'Carlos Pérez-Aradros Herce', 'DjangoAchilles', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
autoclass_content = 'both'
|
apache-2.0
| 7,124,134,120,415,301,000
| 31.51417
| 87
| 0.708505
| false
| 3.737087
| true
| false
| false
|
openstack/mistral
|
mistral/api/controllers/v2/sub_execution.py
|
1
|
4240
|
# Copyright 2020 - Nokia Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from pecan import request
from pecan import rest
import wsmeext.pecan as wsme_pecan
from mistral.api.controllers.v2 import resources
from mistral.api.controllers.v2 import types
from mistral.db.v2 import api as db_api
from mistral.utils import rest_utils
from mistral.workflow import states
LOG = logging.getLogger(__name__)
def get_task_sub_executions_list(task_ex_id, filters, cur_depth):
task_sub_execs = []
with db_api.transaction():
task_ex = db_api.get_task_execution(task_ex_id)
if filters['errors_only'] and task_ex.state != states.ERROR:
return []
child_wf_executions = task_ex.workflow_executions
for c_ex in child_wf_executions:
task_sub_execs.extend(
get_execution_sub_executions_list(
c_ex.id,
filters,
cur_depth
)
)
return task_sub_execs
def get_execution_sub_executions_list(wf_ex_id, filters, cur_depth):
max_depth = filters['max_depth']
include_output = filters['include_output']
ex_sub_execs = []
if 0 <= max_depth < cur_depth:
return []
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex_id)
wf_resource = _get_wf_resource_from_db_model(
wf_ex,
include_output)
ex_sub_execs.append(wf_resource)
task_execs = wf_ex.task_executions
for t_ex in task_execs:
task_sub_executions = get_task_sub_executions_list(
t_ex.id,
filters,
cur_depth + 1
)
ex_sub_execs.extend(task_sub_executions)
return ex_sub_execs
def _get_wf_resource_from_db_model(wf_ex, include_output):
if include_output:
rest_utils.load_deferred_fields(wf_ex, ['params', 'input', 'output'])
else:
rest_utils.load_deferred_fields(wf_ex, ['params', 'input'])
return resources.Execution.from_db_model(wf_ex)
def _get_sub_executions(origin, id, filters):
if origin == 'execution':
return get_execution_sub_executions_list(id, filters, cur_depth=0)
else:
return get_task_sub_executions_list(id, filters, cur_depth=0)
class SubExecutionsController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Executions, types.uuid, bool, int, bool)
def get(self, id, errors_only=False, max_depth=-1, include_output=False):
"""Return workflow execution report.
:param id: The ID of the workflow execution or task execution
to get the sub-executions of.
:param errors_only: Optional. If True, only error paths of the
execution tree are returned .
:param max_depth: Optional. Limits the depth of recursion while
obtaining the execution tree. If a value of the
flag is a negative number then no limit is set.
:param include_output: Optional. Include the output for all executions
in the list.
"""
origin = 'execution' if request.path.startswith('/v2/executions') \
else 'task'
LOG.info(
"Fetching sub executions of %s [id=%s]",
origin,
id
)
filters = {
'errors_only': errors_only,
'max_depth': max_depth,
'include_output': include_output
}
sub_executions_resource = _get_sub_executions(origin, id, filters)
return resources.Executions.convert_with_links(
sub_executions_resource,
request.application_url,
)
|
apache-2.0
| -6,536,085,960,045,563,000
| 30.641791
| 78
| 0.635849
| false
| 3.66465
| false
| false
| false
|
liaralabs/swizzin
|
scripts/deluge.UpdateTracker.py
|
1
|
2540
|
# from https://github.com/s0undt3ch/Deluge/blob/master/deluge/ui/console/commands/update-tracker.py
# update-tracker.py
#
# Copyright (C) 2008-2009 Ido Abramovich <ido.deluge@gmail.com>
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from deluge.ui.console.main import BaseCommand
import deluge.ui.console.colors as colors
from deluge.ui.client import client
import deluge.component as component
from optparse import make_option
class Command(BaseCommand):
"""Update tracker for torrent(s)"""
usage = "Usage: update-tracker [ * | <torrent-id> [<torrent-id> ...] ]"
aliases = ['reannounce']
def handle(self, *args, **options):
self.console = component.get("ConsoleUI")
if len(args) == 0:
self.console.write(self.usage)
return
if len(args) > 0 and args[0].lower() == '*':
args = [""]
torrent_ids = []
for arg in args:
torrent_ids.extend(self.console.match_torrent(arg))
client.core.force_reannounce(torrent_ids)
def complete(self, line):
# We use the ConsoleUI torrent tab complete method
return component.get("ConsoleUI").tab_complete_torrent(line)
|
gpl-3.0
| -8,216,069,250,364,968,000
| 38.076923
| 99
| 0.699213
| false
| 3.751846
| false
| false
| false
|
aesuli/rss-feed-monitor
|
dirs_to_csv.py
|
1
|
1995
|
import argparse
import csv
import logging
import re
import os
import sys
BLANK_SUB = '_'
LABEL_SEPARATOR = ':'
ID_SEPARATOR = '_'
def read_data(directory):
logger = logging.getLogger(sys.argv[0])
for subdir in next(os.walk(directory))[1]:
label = subdir
subpath = os.path.join(directory, subdir)
for file in next(os.walk(subpath))[2]:
filename = os.path.join(subpath, file)
logger.info(filename)
with open(filename, mode='r', encoding='utf-8') as inputfile:
text = inputfile.read()
yield directory + ID_SEPARATOR + file, text, label
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true')
parser.add_argument('name', help='name of the classification schema', type=str)
parser.add_argument('output_file', help='output csv file', type=str)
parser.add_argument('directory',
help='Paths to directories contaning the labeled documents (label=subdir, document=file in subdir)',
type=str,
nargs='+')
args = parser.parse_args()
logger = logging.getLogger(sys.argv[0])
ch = logging.StreamHandler()
logger.addHandler(ch)
if args.verbose:
logger.setLevel(logging.INFO)
logger.info('Verbose output')
schema_name = args.name
count = 0
with open(args.output_file, mode='w', encoding='utf-8') as output:
csvout = csv.writer(output)
for directory in args.directory:
data_generator = read_data(directory)
for id, text, label in data_generator:
no_space_label = re.sub('\s', BLANK_SUB, schema_name + LABEL_SEPARATOR + label)
csvout.writerow([id, text, no_space_label])
count += 1
logger.info('Processed '+ str(count) + ' files.')
|
gpl-3.0
| -1,952,062,115,747,103,200
| 35.272727
| 124
| 0.61203
| false
| 3.974104
| false
| false
| false
|
dichen001/Go4Jobs
|
JackChen/linked_list/328. Odd Even Linked List.py
|
1
|
1154
|
"""
Given a singly linked list, group all odd nodes together followed by the even nodes. Please note here we are talking about the node number and not the value in the nodes.
You should try to do it in place. The program should run in O(1) space complexity and O(nodes) time complexity.
Example:
Given 1->2->3->4->5->NULL,
return 1->3->5->2->4->NULL.
Note:
The relative order inside both the even and odd groups should remain as it was in the input.
The first node is considered odd, the second node even and so on ...
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
odd, even = head, head.next
o, e = odd, even
while odd and even and odd.next and even.next:
odd.next = odd.next.next
odd = odd.next
even.next = even.next.next
even = even.next
odd.next = e
return o
|
gpl-3.0
| -3,072,681,138,911,164,400
| 29.368421
| 170
| 0.613518
| false
| 3.675159
| false
| false
| false
|
childresslab/MicrocavityExp1
|
hardware/laser/simple_laser_dummy.py
|
2
|
6456
|
# -*- coding: utf-8 -*-
"""
This module acts like a laser.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from core.module import Base
from interface.simple_laser_interface import SimpleLaserInterface
from interface.simple_laser_interface import LaserState
from interface.simple_laser_interface import ShutterState
from interface.simple_laser_interface import ControlMode
import math
import random
import time
class SimpleLaserDummy(Base, SimpleLaserInterface):
"""
Lazor dummy
"""
_modclass = 'laserdummy'
_modtype = 'hardware'
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.lstate = LaserState.OFF
self.shutter = ShutterState.CLOSED
self.mode = ControlMode.POWER
self.current_setpoint = 0
self.power_setpoint = 0
def on_activate(self):
""" Activate module.
"""
pass
def on_deactivate(self):
""" Deactivate module.
"""
pass
def get_power_range(self):
""" Return optical power range
@return (float, float): power range
"""
return (0, 0.250)
def get_power(self):
""" Return laser power
@return float: Laser power in watts
"""
return self.power_setpoint * random.gauss(1, 0.01)
def get_power_setpoint(self):
""" Return optical power setpoint.
@return float: power setpoint in watts
"""
return self.power_setpoint
def set_power(self, power):
""" Set power setpoint.
@param float power: power setpoint
@return float: actual new power setpoint
"""
self.power_setpoint = power
self.current_setpoint = math.sqrt(4*self.power_setpoint)*100
return self.power_setpoint
def get_current_unit(self):
""" Get unit for laser current.
@return str: unit
"""
return '%'
def get_current_range(self):
""" Get laser current range.
@return (float, float): laser current range
"""
return (0, 100)
def get_current(self):
""" Get current laser current
@return float: laser current in current curent units
"""
return self.current_setpoint * random.gauss(1, 0.05)
def get_current_setpoint(self):
""" Get laser curent setpoint
@return float: laser current setpoint
"""
return self.current_setpoint
def set_current(self, current):
""" Set laser current setpoint
@prarm float current: desired laser current setpoint
@return float: actual laser current setpoint
"""
self.current_setpoint = current
self.power_setpoint = math.pow(self.current_setpoint/100, 2) / 4
return self.current_setpoint
def allowed_control_modes(self):
""" Get supported control modes
@return list(): list of supported ControlMode
"""
return [ControlMode.POWER, ControlMode.CURRENT]
def get_control_mode(self):
""" Get the currently active control mode
@return ControlMode: active control mode
"""
return self.mode
def set_control_mode(self, control_mode):
""" Set the active control mode
@param ControlMode control_mode: desired control mode
@return ControlMode: actual active ControlMode
"""
self.mode = control_mode
return self.mode
def on(self):
""" Turn on laser.
@return LaserState: actual laser state
"""
time.sleep(1)
self.lstate = LaserState.ON
return self.lstate
def off(self):
""" Turn off laser.
@return LaserState: actual laser state
"""
time.sleep(1)
self.lstate = LaserState.OFF
return self.lstate
def get_laser_state(self):
""" Get laser state
@return LaserState: actual laser state
"""
return self.lstate
def set_laser_state(self, state):
""" Set laser state.
@param LaserState state: desired laser state
@return LaserState: actual laser state
"""
time.sleep(1)
self.lstate = state
return self.lstate
def get_shutter_state(self):
""" Get laser shutter state
@return ShutterState: actual laser shutter state
"""
return self.shutter
def set_shutter_state(self, state):
""" Set laser shutter state.
@param ShutterState state: desired laser shutter state
@return ShutterState: actual laser shutter state
"""
time.sleep(1)
self.shutter = state
return self.shutter
def get_temperatures(self):
""" Get all available temperatures.
@return dict: dict of temperature namce and value in degrees Celsius
"""
return {
'psu': 32.2 * random.gauss(1, 0.1),
'head': 42.0 * random.gauss(1, 0.2)
}
def set_temperatures(self, temps):
""" Set temperatures for lasers with tunable temperatures.
@return {}: empty dict, dummy not a tunable laser
"""
return {}
def get_temperature_setpoints(self):
""" Get temperature setpoints.
@return dict: temperature setpoints for temperature tunable lasers
"""
return {'psu': 32.2, 'head': 42.0}
def get_extra_info(self):
""" Multiple lines of dignostic information
@return str: much laser, very useful
"""
return "Dummy laser v0.9.9\nnot used very much\nvery cheap price very good quality"
|
gpl-3.0
| -5,375,667,728,137,546,000
| 26.355932
| 91
| 0.606103
| false
| 4.141116
| false
| false
| false
|
zhounetworking/build_job
|
lib/build_job.py
|
1
|
5769
|
#!/usr/bin/python
#-*- coding:utf8 -*-
#
import os
import sys
import time
import json
import traceback
import jenkinsapi
from functools import partial
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.custom_exceptions import WillNotBuild
from conf.config import * # job_dict,jenkins_dic
from conf.game_config import game_info
from lib.utils import Mail,log,notification
from task.manage_task import revoke_task,get_active_task_list
import socket
socket.setdefaulttimeout(connect_jenkins_timeout)
#def build_job(job_name, jenkins_url="",username="",passwd="",params={}):
def build_job(job_name, jenkins_url="",username="",passwd=""):
status = {
'error' : [],
'stat' : None,
'last_no' : None,
'next_no' : None,
'params' : None,
'run_time' : None,
'job_name' : job_name[0],
}
if not isinstance(job_name, tuple):
status['error'].append("job_name_not_tuple")
return status
try:
j = Jenkins( jenkins_url,username,passwd )
except:
status['error'].append("connect_jenkins_err")
return status
if job_name[0] not in j.get_jobs_list():
status['error'].append("job_not_exist")
return status
# 为 jenkins任务 加参数
# if params:
# job = j.get_job(job_name[0],params)
# else:
# job = j.get_job(job_name[0])
job = j.get_job(job_name[0])
# when job running in first , get_last_buildnumber error
try:
s_last = job.get_last_buildnumber()
except:
s_last = 0
# if job running now, stop it!
if job.is_queued():
status['error'].append("before_is_queued")
return status
elif job.is_running():
s_last_job = job.get_last_build()
if s_last_job.stop():
status['stop_before_running'] = True
else:
status['stop_before_running'] = False
return status
try:
if len(job_name) > 1:
j.build_job( job_name[0], job_name[1] )
status['params'] = job_name[1]
else:
j.build_job( job_name[0])
except WillNotBuild:
status['error'].append("job_run_err")
return status
#return traceback.print_exc()
except Exception:
log.exception('otherError')
status['error'].append("other_error")
return status
# In the quiet period of jenkins
while job.is_queued():
time.sleep(1)
else:
if job.get_last_buildnumber() > s_last:
e_last = job.get_last_build()
else:
status['error'].append("job_number_err")
return status
while e_last.is_running():
time.sleep(1)
else:
if e_last.is_good() and e_last.get_status() == 'SUCCESS':
status['stat'] = 'SUCCESS'
else:
status['stat'] = 'FAILURE'
status['error'].append("job_appfail_err")
status['last_no'] = s_last
status['next_no'] = e_last.buildno
status['task_info']= e_last.get_console()
status['run_time'] = e_last.get_duration().total_seconds()
return status
def choose_jenkins(jenkins_job_list_txt):
'''
jenkins_job_list :
conf/config.py 中定义的任务名中的元组, 如 ('testjob',)
jenkins_dic :
jenkins 配置
'''
#job = jenkins_job_list[0]
job = jenkins_job_list_txt
if job.startswith('zgh') or job.startswith('zhanguo'):
jenkins_info = jenkins_dic['zgh']
elif job.startswith('lme'):
jenkins_info = jenkins_dic['lme']
elif job.startswith('pp2'):
jenkins_info = jenkins_dic['pp2']
elif job.startswith('pp1') or job.startswith('test'):
jenkins_info = jenkins_dic['pp1']
else:
raise Exception, "No jenkins config info"
print "job_list: %s ,url: %s"%(job,jenkins_info['url'])
return jenkins_info
#def build_job_handle(jenkins_info,jenkins_job,params={}):
def build_job_handle(jenkins_info,jenkins_job):
jenkins_url = jenkins_info['url']
jenkins_user = jenkins_info['user']
jenkins_passwd = jenkins_info['passwd']
build_job_handle = partial(
build_job,
jenkins_url=jenkins_url,
username=jenkins_user,
passwd=jenkins_passwd,
# params=params,
)
return build_job_handle(jenkins_job)
def check_job_status(res_list,p_id,u_type):
# add qa mail
MAIL_TO = mail_to[:]
MAIL_TO.extend(mail_to_qa)
# add designer mail
if game_info[p_id]['messages']['inform']:
try:
MAIL_TO.extend(game_info[p_id]['messages']['design_mail'])
print('add designer mail: %s'%game_info[p_id]['messages']['design_mail'])
except:
print('get platform name fail [ %s ]'%game_info[p_id][u_type]['context'])
mail = partial( Mail,
user = papasg_user,
passwd = papasg_passwd,
mail_to= MAIL_TO,
smtp_server = smtp_server,
subject = subject
)
success = True
for res in res_list:
if res['stat'] != 'SUCCESS':
success = False
err_job_name = res['job_name']
if success:
mail_text = mail_end_notify_ok
else:
mail_text = mail_end_notify_fail % err_job_name
mail(subject=game_info[p_id][u_type]['context'],mail_text=mail_text)
if __name__ == '__main__':
jenkins_url_test = 'http://jenkins.hrgame.com:8080/'
stat = build_job(('客户端_04_同步资源到正式环境_FL越狱!!',{'ok':'no','Bool':False}),jenkins_url_test)
if stat['stat']:
notification(stat)
else:
print 'fail'
check_job_status([ stat ])
print json.dumps(stat,indent=3)
print stat['job_name']
|
apache-2.0
| 5,577,391,660,055,654,000
| 26
| 91
| 0.579428
| false
| 3.355124
| false
| false
| false
|
Azure/WALinuxAgent
|
azurelinuxagent/common/osutil/default.py
|
1
|
60738
|
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import base64
import datetime
import errno
import fcntl
import glob
import json
import multiprocessing
import os
import platform
import pwd
import re
import shutil
import socket
import struct
import sys
import time
from pwd import getpwall
import array
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
import azurelinuxagent.common.utils.textutil as textutil
from azurelinuxagent.common.exception import OSUtilError
from azurelinuxagent.common.future import ustr, array_to_bytes
from azurelinuxagent.common.utils.cryptutil import CryptUtil
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
from azurelinuxagent.common.utils.networkutil import RouteEntry, NetworkInterfaceCard, AddFirewallRules
from azurelinuxagent.common.utils.shellutil import CommandError
__RULES_FILES__ = ["/lib/udev/rules.d/75-persistent-net-generator.rules",
"/etc/udev/rules.d/70-persistent-net.rules"]
"""
Define distro specific behavior. OSUtil class defines default behavior
for all distros. Each concrete distro classes could overwrite default behavior
if needed.
"""
_IPTABLES_VERSION_PATTERN = re.compile("^[^\d\.]*([\d\.]+).*$") # pylint: disable=W1401
_IPTABLES_LOCKING_VERSION = FlexibleVersion('1.4.21')
def _add_wait(wait, command):
"""
If 'wait' is True, adds the wait option (-w) to the given iptables command line
"""
if wait:
command.insert(1, "-w")
return command
def _get_iptables_version_command():
return ["iptables", "--version"]
def _get_firewall_accept_command(wait, command, destination, owner_uid):
return AddFirewallRules.get_iptables_accept_command(wait, command, destination, owner_uid)
def _get_firewall_drop_command(wait, command, destination):
return AddFirewallRules.get_iptables_drop_command(wait, command, destination)
def _get_firewall_list_command(wait):
return _add_wait(wait, ["iptables", "-t", "security", "-L", "-nxv"])
def _get_firewall_packets_command(wait):
return _add_wait(wait, ["iptables", "-t", "security", "-L", "OUTPUT", "--zero", "OUTPUT", "-nxv"])
# Precisely delete the rules created by the agent.
# this rule was used <= 2.2.25. This rule helped to validate our change, and determine impact.
def _get_firewall_delete_conntrack_accept_command(wait, destination):
return _add_wait(wait,
["iptables", "-t", "security", "-D", "OUTPUT", "-d", destination, "-p", "tcp", "-m", "conntrack",
"--ctstate", "INVALID,NEW", "-j", "ACCEPT"])
def _get_firewall_delete_owner_accept_command(wait, destination, owner_uid):
return _add_wait(wait, ["iptables", "-t", "security", "-D", "OUTPUT", "-d", destination, "-p", "tcp", "-m", "owner",
"--uid-owner", str(owner_uid), "-j", "ACCEPT"])
def _get_firewall_delete_conntrack_drop_command(wait, destination):
return _add_wait(wait,
["iptables", "-t", "security", "-D", "OUTPUT", "-d", destination, "-p", "tcp", "-m", "conntrack",
"--ctstate", "INVALID,NEW", "-j", "DROP"])
PACKET_PATTERN = "^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$" # pylint: disable=W1401
ALL_CPUS_REGEX = re.compile('^cpu .*')
_enable_firewall = True
DMIDECODE_CMD = 'dmidecode --string system-uuid'
PRODUCT_ID_FILE = '/sys/class/dmi/id/product_uuid'
UUID_PATTERN = re.compile(
r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$',
re.IGNORECASE)
IOCTL_SIOCGIFCONF = 0x8912
IOCTL_SIOCGIFFLAGS = 0x8913
IOCTL_SIOCGIFHWADDR = 0x8927
IFNAMSIZ = 16
IP_COMMAND_OUTPUT = re.compile('^\d+:\s+(\w+):\s+(.*)$') # pylint: disable=W1401
STORAGE_DEVICE_PATH = '/sys/bus/vmbus/devices/'
GEN2_DEVICE_ID = 'f8b3781a-1e82-4818-a1c3-63d806ec15bb'
class DefaultOSUtil(object):
def __init__(self):
self.agent_conf_file_path = '/etc/waagent.conf'
self.selinux = None
self.disable_route_warning = False
self.jit_enabled = False
self.service_name = self.get_service_name()
@staticmethod
def get_service_name():
return "waagent"
@staticmethod
def get_systemd_unit_file_install_path():
return "/lib/systemd/system"
@staticmethod
def get_agent_bin_path():
return "/usr/sbin"
def get_firewall_dropped_packets(self, dst_ip=None):
# If a previous attempt failed, do not retry
global _enable_firewall # pylint: disable=W0603
if not _enable_firewall:
return 0
try:
wait = self.get_firewall_will_wait()
try:
output = shellutil.run_command(_get_firewall_packets_command(wait))
pattern = re.compile(PACKET_PATTERN.format(dst_ip))
for line in output.split('\n'):
m = pattern.match(line)
if m is not None:
return int(m.group(1))
except Exception as e:
if isinstance(e, CommandError) and e.returncode == 3: # pylint: disable=E1101
# Transient error that we ignore. This code fires every loop
# of the daemon (60m), so we will get the value eventually.
return 0
logger.warn("Failed to get firewall packets: {0}", ustr(e))
return -1
return 0
except Exception as e:
_enable_firewall = False
logger.warn("Unable to retrieve firewall packets dropped"
"{0}".format(ustr(e)))
return -1
def get_firewall_will_wait(self):
# Determine if iptables will serialize access
try:
output = shellutil.run_command(_get_iptables_version_command())
except Exception as e:
msg = "Unable to determine version of iptables: {0}".format(ustr(e))
logger.warn(msg)
raise Exception(msg)
m = _IPTABLES_VERSION_PATTERN.match(output)
if m is None:
msg = "iptables did not return version information: {0}".format(output)
logger.warn(msg)
raise Exception(msg)
wait = "-w" \
if FlexibleVersion(m.group(1)) >= _IPTABLES_LOCKING_VERSION \
else ""
return wait
def _delete_rule(self, rule):
"""
Continually execute the delete operation until the return
code is non-zero or the limit has been reached.
"""
for i in range(1, 100): # pylint: disable=W0612
try:
rc = shellutil.run_command(rule) # pylint: disable=W0612
except CommandError as e:
if e.returncode == 1:
return
if e.returncode == 2:
raise Exception("invalid firewall deletion rule '{0}'".format(rule))
def remove_firewall(self, dst_ip, uid):
# If a previous attempt failed, do not retry
global _enable_firewall # pylint: disable=W0603
if not _enable_firewall:
return False
try:
wait = self.get_firewall_will_wait()
# This rule was <= 2.2.25 only, and may still exist on some VMs. Until 2.2.25
# has aged out, keep this cleanup in place.
self._delete_rule(_get_firewall_delete_conntrack_accept_command(wait, dst_ip))
self._delete_rule(_get_firewall_delete_owner_accept_command(wait, dst_ip, uid))
self._delete_rule(_get_firewall_delete_conntrack_drop_command(wait, dst_ip))
return True
except Exception as e:
_enable_firewall = False
logger.info("Unable to remove firewall -- "
"no further attempts will be made: "
"{0}".format(ustr(e)))
return False
def remove_legacy_firewall_rule(self, dst_ip):
# This function removes the legacy firewall rule that was added <= 2.2.25.
# Not adding the global _enable_firewall check here as this will only be called once per service start and
# we dont want the state of this call to affect other iptable calls.
try:
wait = self.get_firewall_will_wait()
# This rule was <= 2.2.25 only, and may still exist on some VMs. Until 2.2.25
# has aged out, keep this cleanup in place.
self._delete_rule(_get_firewall_delete_conntrack_accept_command(wait, dst_ip))
except Exception as error:
logger.info(
"Unable to remove legacy firewall rule, won't try removing it again. Error: {0}".format(ustr(error)))
def enable_firewall(self, dst_ip, uid):
# If a previous attempt failed, do not retry
global _enable_firewall # pylint: disable=W0603
if not _enable_firewall:
return False
try:
wait = self.get_firewall_will_wait()
# If the DROP rule exists, make no changes
try:
drop_rule = _get_firewall_drop_command(wait, "-C", dst_ip)
shellutil.run_command(drop_rule)
logger.verbose("Firewall appears established")
return True
except CommandError as e:
if e.returncode == 2:
self.remove_firewall(dst_ip, uid)
msg = "please upgrade iptables to a version that supports the -C option"
logger.warn(msg)
raise Exception(msg)
# Otherwise, append both rules
try:
AddFirewallRules.add_iptables_rules(wait, dst_ip, uid)
except Exception as error:
logger.warn(ustr(error))
raise
logger.info("Successfully added Azure fabric firewall rules")
try:
output = shellutil.run_command(_get_firewall_list_command(wait))
logger.info("Firewall rules:\n{0}".format(output))
except Exception as e:
logger.warn("Listing firewall rules failed: {0}".format(ustr(e)))
return True
except Exception as e:
_enable_firewall = False
logger.info("Unable to establish firewall -- "
"no further attempts will be made: "
"{0}".format(ustr(e)))
return False
@staticmethod
def _correct_instance_id(instance_id):
"""
Azure stores the instance ID with an incorrect byte ordering for the
first parts. For example, the ID returned by the metadata service:
D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8
will be found as:
544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8
This code corrects the byte order such that it is consistent with
that returned by the metadata service.
"""
if not UUID_PATTERN.match(instance_id):
return instance_id
parts = instance_id.split('-')
return '-'.join([
textutil.swap_hexstring(parts[0], width=2),
textutil.swap_hexstring(parts[1], width=2),
textutil.swap_hexstring(parts[2], width=2),
parts[3],
parts[4]
])
def is_current_instance_id(self, id_that):
"""
Compare two instance IDs for equality, but allow that some IDs
may have been persisted using the incorrect byte ordering.
"""
id_this = self.get_instance_id()
logger.verbose("current instance id: {0}".format(id_this))
logger.verbose(" former instance id: {0}".format(id_that))
return id_this.lower() == id_that.lower() or \
id_this.lower() == self._correct_instance_id(id_that).lower()
def get_agent_conf_file_path(self):
return self.agent_conf_file_path
def get_instance_id(self):
"""
Azure records a UUID as the instance ID
First check /sys/class/dmi/id/product_uuid.
If that is missing, then extracts from dmidecode
If nothing works (for old VMs), return the empty string
"""
if os.path.isfile(PRODUCT_ID_FILE):
s = fileutil.read_file(PRODUCT_ID_FILE).strip()
else:
rc, s = shellutil.run_get_output(DMIDECODE_CMD)
if rc != 0 or UUID_PATTERN.match(s) is None:
return ""
return self._correct_instance_id(s.strip())
@staticmethod
def get_userentry(username):
try:
return pwd.getpwnam(username)
except KeyError:
return None
def is_sys_user(self, username):
"""
Check whether use is a system user.
If reset sys user is allowed in conf, return False
Otherwise, check whether UID is less than UID_MIN
"""
if conf.get_allow_reset_sys_user():
return False
userentry = self.get_userentry(username)
uidmin = None
try:
uidmin_def = fileutil.get_line_startingwith("UID_MIN",
"/etc/login.defs")
if uidmin_def is not None:
uidmin = int(uidmin_def.split()[1])
except IOError as e: # pylint: disable=W0612
pass
if uidmin == None:
uidmin = 100
if userentry != None and userentry[2] < uidmin:
return True
else:
return False
def useradd(self, username, expiration=None, comment=None):
"""
Create user account with 'username'
"""
userentry = self.get_userentry(username)
if userentry is not None:
logger.info("User {0} already exists, skip useradd", username)
return
if expiration is not None:
cmd = ["useradd", "-m", username, "-e", expiration]
else:
cmd = ["useradd", "-m", username]
if comment is not None:
cmd.extend(["-c", comment])
self._run_command_raising_OSUtilError(cmd, err_msg="Failed to create user account:{0}".format(username))
def chpasswd(self, username, password, crypt_id=6, salt_len=10):
if self.is_sys_user(username):
raise OSUtilError(("User {0} is a system user, "
"will not set password.").format(username))
passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len)
self._run_command_raising_OSUtilError(["usermod", "-p", passwd_hash, username],
err_msg="Failed to set password for {0}".format(username))
def get_users(self):
return getpwall()
def conf_sudoer(self, username, nopasswd=False, remove=False):
sudoers_dir = conf.get_sudoers_dir()
sudoers_wagent = os.path.join(sudoers_dir, 'waagent')
if not remove:
# for older distros create sudoers.d
if not os.path.isdir(sudoers_dir):
# create the sudoers.d directory
fileutil.mkdir(sudoers_dir)
# add the include of sudoers.d to the /etc/sudoers
sudoers_file = os.path.join(sudoers_dir, os.pardir, 'sudoers')
include_sudoers_dir = "\n#includedir {0}\n".format(sudoers_dir)
fileutil.append_file(sudoers_file, include_sudoers_dir)
sudoer = None
if nopasswd:
sudoer = "{0} ALL=(ALL) NOPASSWD: ALL".format(username)
else:
sudoer = "{0} ALL=(ALL) ALL".format(username)
if not os.path.isfile(sudoers_wagent) or \
fileutil.findstr_in_file(sudoers_wagent, sudoer) is False:
fileutil.append_file(sudoers_wagent, "{0}\n".format(sudoer))
fileutil.chmod(sudoers_wagent, 0o440)
else:
# remove user from sudoers
if os.path.isfile(sudoers_wagent):
try:
content = fileutil.read_file(sudoers_wagent)
sudoers = content.split("\n")
sudoers = [x for x in sudoers if username not in x]
fileutil.write_file(sudoers_wagent, "\n".join(sudoers))
except IOError as e:
raise OSUtilError("Failed to remove sudoer: {0}".format(e))
def del_root_password(self):
try:
passwd_file_path = conf.get_passwd_file_path()
passwd_content = fileutil.read_file(passwd_file_path)
passwd = passwd_content.split('\n')
new_passwd = [x for x in passwd if not x.startswith("root:")]
new_passwd.insert(0, "root:*LOCK*:14600::::::")
fileutil.write_file(passwd_file_path, "\n".join(new_passwd))
except IOError as e:
raise OSUtilError("Failed to delete root password:{0}".format(e))
@staticmethod
def _norm_path(filepath):
home = conf.get_home_dir()
# Expand HOME variable if present in path
path = os.path.normpath(filepath.replace("$HOME", home))
return path
def deploy_ssh_keypair(self, username, keypair):
"""
Deploy id_rsa and id_rsa.pub
"""
path, thumbprint = keypair
path = self._norm_path(path)
dir_path = os.path.dirname(path)
fileutil.mkdir(dir_path, mode=0o700, owner=username)
lib_dir = conf.get_lib_dir()
prv_path = os.path.join(lib_dir, thumbprint + '.prv')
if not os.path.isfile(prv_path):
raise OSUtilError("Can't find {0}.prv".format(thumbprint))
shutil.copyfile(prv_path, path)
pub_path = path + '.pub'
crytputil = CryptUtil(conf.get_openssl_cmd())
pub = crytputil.get_pubkey_from_prv(prv_path)
fileutil.write_file(pub_path, pub)
self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0')
self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0')
os.chmod(path, 0o644)
os.chmod(pub_path, 0o600)
def openssl_to_openssh(self, input_file, output_file):
cryptutil = CryptUtil(conf.get_openssl_cmd())
cryptutil.crt_to_ssh(input_file, output_file)
def deploy_ssh_pubkey(self, username, pubkey):
"""
Deploy authorized_key
"""
path, thumbprint, value = pubkey
if path is None:
raise OSUtilError("Public key path is None")
crytputil = CryptUtil(conf.get_openssl_cmd())
path = self._norm_path(path)
dir_path = os.path.dirname(path)
fileutil.mkdir(dir_path, mode=0o700, owner=username)
if value is not None:
if not value.startswith("ssh-"):
raise OSUtilError("Bad public key: {0}".format(value))
if not value.endswith("\n"):
value += "\n"
fileutil.write_file(path, value)
elif thumbprint is not None:
lib_dir = conf.get_lib_dir()
crt_path = os.path.join(lib_dir, thumbprint + '.crt')
if not os.path.isfile(crt_path):
raise OSUtilError("Can't find {0}.crt".format(thumbprint))
pub_path = os.path.join(lib_dir, thumbprint + '.pub')
pub = crytputil.get_pubkey_from_crt(crt_path)
fileutil.write_file(pub_path, pub)
self.set_selinux_context(pub_path,
'unconfined_u:object_r:ssh_home_t:s0')
self.openssl_to_openssh(pub_path, path)
fileutil.chmod(pub_path, 0o600)
else:
raise OSUtilError("SSH public key Fingerprint and Value are None")
self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0')
fileutil.chowner(path, username)
fileutil.chmod(path, 0o644)
def is_selinux_system(self):
"""
Checks and sets self.selinux = True if SELinux is available on system.
"""
if self.selinux == None:
if shellutil.run("which getenforce", chk_err=False) == 0:
self.selinux = True
else:
self.selinux = False
return self.selinux
def is_selinux_enforcing(self):
"""
Calls shell command 'getenforce' and returns True if 'Enforcing'.
"""
if self.is_selinux_system():
output = shellutil.run_get_output("getenforce")[1]
return output.startswith("Enforcing")
else:
return False
def set_selinux_context(self, path, con): # pylint: disable=R1710
"""
Calls shell 'chcon' with 'path' and 'con' context.
Returns exit result.
"""
if self.is_selinux_system():
if not os.path.exists(path):
logger.error("Path does not exist: {0}".format(path))
return 1
try:
shellutil.run_command(['chcon', con, path], log_error=True)
except shellutil.CommandError as cmd_err:
return cmd_err.returncode
return 0
def conf_sshd(self, disable_password):
option = "no" if disable_password else "yes"
conf_file_path = conf.get_sshd_conf_file_path()
conf_file = fileutil.read_file(conf_file_path).split("\n")
textutil.set_ssh_config(conf_file, "PasswordAuthentication", option)
textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", option)
textutil.set_ssh_config(conf_file, "ClientAliveInterval", str(conf.get_ssh_client_alive_interval()))
fileutil.write_file(conf_file_path, "\n".join(conf_file))
logger.info("{0} SSH password-based authentication methods."
.format("Disabled" if disable_password else "Enabled"))
logger.info("Configured SSH client probing to keep connections alive.")
def get_dvd_device(self, dev_dir='/dev'):
pattern = r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]|vd[b-z])'
device_list = os.listdir(dev_dir)
for dvd in [re.match(pattern, dev) for dev in device_list]:
if dvd is not None:
return "/dev/{0}".format(dvd.group(0))
inner_detail = "The following devices were found, but none matched " \
"the pattern [{0}]: {1}\n".format(pattern, device_list)
raise OSUtilError(msg="Failed to get dvd device from {0}".format(dev_dir),
inner=inner_detail)
def mount_dvd(self,
max_retry=6,
chk_err=True,
dvd_device=None,
mount_point=None,
sleep_time=5):
if dvd_device is None:
dvd_device = self.get_dvd_device()
if mount_point is None:
mount_point = conf.get_dvd_mount_point()
mount_list = shellutil.run_get_output("mount")[1]
existing = self.get_mount_point(mount_list, dvd_device)
if existing is not None:
# already mounted
logger.info("{0} is already mounted at {1}", dvd_device, existing)
return
if not os.path.isdir(mount_point):
os.makedirs(mount_point)
err = ''
for retry in range(1, max_retry):
return_code, err = self.mount(dvd_device,
mount_point,
option=["-o", "ro", "-t", "udf,iso9660,vfat"],
chk_err=False)
if return_code == 0:
logger.info("Successfully mounted dvd")
return
else:
logger.warn(
"Mounting dvd failed [retry {0}/{1}, sleeping {2} sec]",
retry,
max_retry - 1,
sleep_time)
if retry < max_retry:
time.sleep(sleep_time)
if chk_err:
raise OSUtilError("Failed to mount dvd device", inner=err)
def umount_dvd(self, chk_err=True, mount_point=None):
if mount_point is None:
mount_point = conf.get_dvd_mount_point()
return_code = self.umount(mount_point, chk_err=chk_err)
if chk_err and return_code != 0:
raise OSUtilError("Failed to unmount dvd device at {0}".format(mount_point))
def eject_dvd(self, chk_err=True):
dvd = self.get_dvd_device()
dev = dvd.rsplit('/', 1)[1]
pattern = r'(vd[b-z])'
# We should not eject if the disk is not a cdrom
if re.search(pattern, dev):
return
try:
shellutil.run_command(["eject", dvd])
except shellutil.CommandError as cmd_err:
if chk_err:
msg = "Failed to eject dvd: ret={0}\n[stdout]\n{1}\n\n[stderr]\n{2}"\
.format(cmd_err.returncode, cmd_err.stdout, cmd_err.stderr)
raise OSUtilError(msg)
def try_load_atapiix_mod(self):
try:
self.load_atapiix_mod()
except Exception as e:
logger.warn("Could not load ATAPI driver: {0}".format(e))
def load_atapiix_mod(self):
if self.is_atapiix_mod_loaded():
return
ret, kern_version = shellutil.run_get_output("uname -r")
if ret != 0:
raise Exception("Failed to call uname -r")
mod_path = os.path.join('/lib/modules',
kern_version.strip('\n'),
'kernel/drivers/ata/ata_piix.ko')
if not os.path.isfile(mod_path):
raise Exception("Can't find module file:{0}".format(mod_path))
ret, output = shellutil.run_get_output("insmod " + mod_path) # pylint: disable=W0612
if ret != 0:
raise Exception("Error calling insmod for ATAPI CD-ROM driver")
if not self.is_atapiix_mod_loaded(max_retry=3):
raise Exception("Failed to load ATAPI CD-ROM driver")
def is_atapiix_mod_loaded(self, max_retry=1):
for retry in range(0, max_retry):
ret = shellutil.run("lsmod | grep ata_piix", chk_err=False)
if ret == 0:
logger.info("Module driver for ATAPI CD-ROM is already present.")
return True
if retry < max_retry - 1:
time.sleep(1)
return False
def mount(self, device, mount_point, option=None, chk_err=True):
if not option:
option = []
cmd = ["mount"]
cmd.extend(option + [device, mount_point])
try:
output = shellutil.run_command(cmd, log_error=chk_err)
except shellutil.CommandError as cmd_err:
detail = "[{0}] returned {1}:\n stdout: {2}\n\nstderr: {3}".format(cmd, cmd_err.returncode,
cmd_err.stdout, cmd_err.stderr)
return cmd_err.returncode, detail
return 0, output
def umount(self, mount_point, chk_err=True):
try:
shellutil.run_command(["umount", mount_point], log_error=chk_err)
except shellutil.CommandError as cmd_err:
return cmd_err.returncode
return 0
def allow_dhcp_broadcast(self):
# Open DHCP port if iptables is enabled.
# We supress error logging on error.
shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT",
chk_err=False)
shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT",
chk_err=False)
def remove_rules_files(self, rules_files=None):
if rules_files is None:
rules_files = __RULES_FILES__
lib_dir = conf.get_lib_dir()
for src in rules_files:
file_name = fileutil.base_name(src)
dest = os.path.join(lib_dir, file_name)
if os.path.isfile(dest):
os.remove(dest)
if os.path.isfile(src):
logger.warn("Move rules file {0} to {1}", file_name, dest)
shutil.move(src, dest)
def restore_rules_files(self, rules_files=None):
if rules_files is None:
rules_files = __RULES_FILES__
lib_dir = conf.get_lib_dir()
for dest in rules_files:
filename = fileutil.base_name(dest)
src = os.path.join(lib_dir, filename)
if os.path.isfile(dest):
continue
if os.path.isfile(src):
logger.warn("Move rules file {0} to {1}", filename, dest)
shutil.move(src, dest)
def get_mac_addr(self):
"""
Convenience function, returns mac addr bound to
first non-loopback interface.
"""
ifname = self.get_if_name()
addr = self.get_if_mac(ifname)
return textutil.hexstr_to_bytearray(addr)
def get_if_mac(self, ifname):
"""
Return the mac-address bound to the socket.
"""
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
param = struct.pack('256s', (ifname[:15] + ('\0' * 241)).encode('latin-1'))
info = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFHWADDR, param)
sock.close()
return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]])
@staticmethod
def _get_struct_ifconf_size():
"""
Return the sizeof struct ifinfo. On 64-bit platforms the size is 40 bytes;
on 32-bit platforms the size is 32 bytes.
"""
python_arc = platform.architecture()[0]
struct_size = 32 if python_arc == '32bit' else 40
return struct_size
def _get_all_interfaces(self):
"""
Return a dictionary mapping from interface name to IPv4 address.
Interfaces without a name are ignored.
"""
expected = 16 # how many devices should I expect...
struct_size = DefaultOSUtil._get_struct_ifconf_size()
array_size = expected * struct_size
buff = array.array('B', b'\0' * array_size)
param = struct.pack('iL', array_size, buff.buffer_info()[0])
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ret = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFCONF, param)
retsize = (struct.unpack('iL', ret)[0])
sock.close()
if retsize == array_size:
logger.warn(('SIOCGIFCONF returned more than {0} up '
'network interfaces.'), expected)
ifconf_buff = array_to_bytes(buff)
ifaces = {}
for i in range(0, array_size, struct_size):
iface = ifconf_buff[i:i + IFNAMSIZ].split(b'\0', 1)[0]
if len(iface) > 0:
iface_name = iface.decode('latin-1')
if iface_name not in ifaces:
ifaces[iface_name] = socket.inet_ntoa(ifconf_buff[i + 20:i + 24])
return ifaces
def get_first_if(self):
"""
Return the interface name, and IPv4 addr of the "primary" interface or,
failing that, any active non-loopback interface.
"""
primary = self.get_primary_interface()
ifaces = self._get_all_interfaces()
if primary in ifaces:
return primary, ifaces[primary]
for iface_name in ifaces.keys():
if not self.is_loopback(iface_name):
logger.info("Choosing non-primary [{0}]".format(iface_name))
return iface_name, ifaces[iface_name]
return '', ''
@staticmethod
def _build_route_list(proc_net_route):
"""
Construct a list of network route entries
:param list(str) proc_net_route: Route table lines, including headers, containing at least one route
:return: List of network route objects
:rtype: list(RouteEntry)
"""
idx = 0
column_index = {}
header_line = proc_net_route[0]
for header in filter(lambda h: len(h) > 0, header_line.split("\t")):
column_index[header.strip()] = idx
idx += 1
try:
idx_iface = column_index["Iface"]
idx_dest = column_index["Destination"]
idx_gw = column_index["Gateway"]
idx_flags = column_index["Flags"]
idx_metric = column_index["Metric"]
idx_mask = column_index["Mask"]
except KeyError:
msg = "/proc/net/route is missing key information; headers are [{0}]".format(header_line)
logger.error(msg)
return []
route_list = []
for entry in proc_net_route[1:]:
route = entry.split("\t")
if len(route) > 0:
route_obj = RouteEntry(route[idx_iface], route[idx_dest], route[idx_gw], route[idx_mask],
route[idx_flags], route[idx_metric])
route_list.append(route_obj)
return route_list
@staticmethod
def read_route_table():
"""
Return a list of strings comprising the route table, including column headers. Each line is stripped of leading
or trailing whitespace but is otherwise unmolested.
:return: Entries in the text route table
:rtype: list(str)
"""
try:
with open('/proc/net/route') as routing_table:
return list(map(str.strip, routing_table.readlines()))
except Exception as e:
logger.error("Cannot read route table [{0}]", ustr(e))
return []
@staticmethod
def get_list_of_routes(route_table):
"""
Construct a list of all network routes known to this system.
:param list(str) route_table: List of text entries from route table, including headers
:return: a list of network routes
:rtype: list(RouteEntry)
"""
route_list = []
count = len(route_table)
if count < 1:
logger.error("/proc/net/route is missing headers")
elif count == 1:
logger.error("/proc/net/route contains no routes")
else:
route_list = DefaultOSUtil._build_route_list(route_table)
return route_list
def get_primary_interface(self):
"""
Get the name of the primary interface, which is the one with the
default route attached to it; if there are multiple default routes,
the primary has the lowest Metric.
:return: the interface which has the default route
"""
# from linux/route.h
RTF_GATEWAY = 0x02
DEFAULT_DEST = "00000000"
primary_interface = None
if not self.disable_route_warning:
logger.info("Examine /proc/net/route for primary interface")
route_table = DefaultOSUtil.read_route_table()
def is_default(route):
return route.destination == DEFAULT_DEST and int(route.flags) & RTF_GATEWAY == RTF_GATEWAY
candidates = list(filter(is_default, DefaultOSUtil.get_list_of_routes(route_table)))
if len(candidates) > 0:
def get_metric(route):
return int(route.metric)
primary_route = min(candidates, key=get_metric)
primary_interface = primary_route.interface
if primary_interface is None:
primary_interface = ''
if not self.disable_route_warning:
with open('/proc/net/route') as routing_table_fh:
routing_table_text = routing_table_fh.read()
logger.warn('Could not determine primary interface, '
'please ensure /proc/net/route is correct')
logger.warn('Contents of /proc/net/route:\n{0}'.format(routing_table_text))
logger.warn('Primary interface examination will retry silently')
self.disable_route_warning = True
else:
logger.info('Primary interface is [{0}]'.format(primary_interface))
self.disable_route_warning = False
return primary_interface
def is_primary_interface(self, ifname):
"""
Indicate whether the specified interface is the primary.
:param ifname: the name of the interface - eth0, lo, etc.
:return: True if this interface binds the default route
"""
return self.get_primary_interface() == ifname
def is_loopback(self, ifname):
"""
Determine if a named interface is loopback.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ifname_buff = ifname + ('\0' * 256)
result = fcntl.ioctl(s.fileno(), IOCTL_SIOCGIFFLAGS, ifname_buff)
flags, = struct.unpack('H', result[16:18])
isloopback = flags & 8 == 8
if not self.disable_route_warning:
logger.info('interface [{0}] has flags [{1}], '
'is loopback [{2}]'.format(ifname, flags, isloopback))
s.close()
return isloopback
def get_dhcp_lease_endpoint(self):
"""
OS specific, this should return the decoded endpoint of
the wireserver from option 245 in the dhcp leases file
if it exists on disk.
:return: The endpoint if available, or None
"""
return None
@staticmethod
def get_endpoint_from_leases_path(pathglob):
"""
Try to discover and decode the wireserver endpoint in the
specified dhcp leases path.
:param pathglob: The path containing dhcp lease files
:return: The endpoint if available, otherwise None
"""
endpoint = None
HEADER_LEASE = "lease"
HEADER_OPTION_245 = "option unknown-245"
HEADER_EXPIRE = "expire"
FOOTER_LEASE = "}"
FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S"
option_245_re = re.compile(
r'\s*option\s+unknown-245\s+([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+):([0-9a-fA-F]+);')
logger.info("looking for leases in path [{0}]".format(pathglob))
for lease_file in glob.glob(pathglob):
leases = open(lease_file).read()
if HEADER_OPTION_245 in leases:
cached_endpoint = None
option_245_match = None
expired = True # assume expired
for line in leases.splitlines():
if line.startswith(HEADER_LEASE):
cached_endpoint = None
expired = True
elif HEADER_EXPIRE in line:
if "never" in line:
expired = False
else:
try:
expire_string = line.split(" ", 4)[-1].strip(";")
expire_date = datetime.datetime.strptime(expire_string, FORMAT_DATETIME)
if expire_date > datetime.datetime.utcnow():
expired = False
except: # pylint: disable=W0702
logger.error("could not parse expiry token '{0}'".format(line))
elif FOOTER_LEASE in line:
logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format(
cached_endpoint, option_245_match is not None, expired))
if not expired and cached_endpoint is not None:
endpoint = cached_endpoint
logger.info("found endpoint [{0}]".format(endpoint))
# we want to return the last valid entry, so
# keep searching
else:
option_245_match = option_245_re.match(line)
if option_245_match is not None:
cached_endpoint = '{0}.{1}.{2}.{3}'.format(
int(option_245_match.group(1), 16),
int(option_245_match.group(2), 16),
int(option_245_match.group(3), 16),
int(option_245_match.group(4), 16))
if endpoint is not None:
logger.info("cached endpoint found [{0}]".format(endpoint))
else:
logger.info("cached endpoint not found")
return endpoint
def is_missing_default_route(self):
try:
route_cmd = ["ip", "route", "show"]
routes = shellutil.run_command(route_cmd)
for route in routes.split("\n"):
if route.startswith("0.0.0.0 ") or route.startswith("default "):
return False
return True
except CommandError as e:
logger.warn("Cannot get the routing table. {0} failed: {1}", ustr(route_cmd), ustr(e))
return False
def get_if_name(self):
if_name = ''
if_found = False
while not if_found:
if_name = self.get_first_if()[0]
if_found = len(if_name) >= 2
if not if_found:
time.sleep(2)
return if_name
def get_ip4_addr(self):
return self.get_first_if()[1]
def set_route_for_dhcp_broadcast(self, ifname):
try:
route_cmd = ["ip", "route", "add", "255.255.255.255", "dev", ifname]
return shellutil.run_command(route_cmd)
except CommandError:
return ""
def remove_route_for_dhcp_broadcast(self, ifname):
try:
route_cmd = ["ip", "route", "del", "255.255.255.255", "dev", ifname]
shellutil.run_command(route_cmd)
except CommandError:
pass
def is_dhcp_available(self):
return True
def is_dhcp_enabled(self):
return False
def stop_dhcp_service(self):
pass
def start_dhcp_service(self):
pass
def start_network(self):
pass
def start_agent_service(self):
pass
def stop_agent_service(self):
pass
def register_agent_service(self):
pass
def unregister_agent_service(self):
pass
def restart_ssh_service(self):
pass
def route_add(self, net, mask, gateway): # pylint: disable=W0613
"""
Add specified route
"""
try:
cmd = ["ip", "route", "add", net, "via", gateway]
return shellutil.run_command(cmd)
except CommandError:
return ""
@staticmethod
def _text_to_pid_list(text):
return [int(n) for n in text.split()]
@staticmethod
def _get_dhcp_pid(command):
try:
return DefaultOSUtil._text_to_pid_list(shellutil.run_command(command))
except CommandError as exception: # pylint: disable=W0612
return []
def get_dhcp_pid(self):
return self._get_dhcp_pid(["pidof", "dhclient"])
def set_hostname(self, hostname):
fileutil.write_file('/etc/hostname', hostname)
self._run_command_without_raising(["hostname", hostname], log_error=False)
def set_dhcp_hostname(self, hostname):
autosend = r'^[^#]*?send\s*host-name.*?(<hostname>|gethostname[(,)])'
dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf', '/etc/dhclient.conf']
for conf_file in dhclient_files:
if not os.path.isfile(conf_file):
continue
if fileutil.findre_in_file(conf_file, autosend):
# Return if auto send host-name is configured
return
fileutil.update_conf_file(conf_file,
'send host-name',
'send host-name "{0}";'.format(hostname))
def restart_if(self, ifname, retries=3, wait=5):
retry_limit = retries + 1
for attempt in range(1, retry_limit):
try:
shellutil.run_command(["ifdown", ifname])
shellutil.run_command(["ifup", ifname])
return
except shellutil.CommandError as cmd_err:
msg = "failed to restart {0}: returncode={1}\n[stdout]{2}\n\n[stderr]{3}\n"\
.format(ifname, cmd_err.returncode, cmd_err.stdout, cmd_err.stderr)
if cmd_err.returncode == 1:
logger.info(msg)
else:
logger.warn(msg)
if attempt < retry_limit:
logger.info("retrying in {0} seconds".format(wait))
time.sleep(wait)
else:
logger.warn("exceeded restart retries")
def publish_hostname(self, hostname):
self.set_dhcp_hostname(hostname)
self.set_hostname_record(hostname)
ifname = self.get_if_name()
self.restart_if(ifname)
def set_scsi_disks_timeout(self, timeout):
for dev in os.listdir("/sys/block"):
if dev.startswith('sd'):
self.set_block_device_timeout(dev, timeout)
def set_block_device_timeout(self, dev, timeout):
if dev is not None and timeout is not None:
file_path = "/sys/block/{0}/device/timeout".format(dev)
content = fileutil.read_file(file_path)
original = content.splitlines()[0].rstrip()
if original != timeout:
fileutil.write_file(file_path, timeout)
logger.info("Set block dev timeout: {0} with timeout: {1}",
dev, timeout)
def get_mount_point(self, mountlist, device):
"""
Example of mountlist:
/dev/sda1 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs
(rw,rootcontext="system_u:object_r:tmpfs_t:s0")
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
/dev/sdb1 on /mnt/resource type ext4 (rw)
"""
if (mountlist and device):
for entry in mountlist.split('\n'):
if (re.search(device, entry)):
tokens = entry.split()
# Return the 3rd column of this line
return tokens[2] if len(tokens) > 2 else None
return None
@staticmethod
def _enumerate_device_id():
"""
Enumerate all storage device IDs.
Args:
None
Returns:
Iterator[Tuple[str, str]]: VmBus and storage devices.
"""
if os.path.exists(STORAGE_DEVICE_PATH):
for vmbus in os.listdir(STORAGE_DEVICE_PATH):
deviceid = fileutil.read_file(os.path.join(STORAGE_DEVICE_PATH, vmbus, "device_id"))
guid = deviceid.strip('{}\n')
yield vmbus, guid
@staticmethod
def search_for_resource_disk(gen1_device_prefix, gen2_device_id):
"""
Search the filesystem for a device by ID or prefix.
Args:
gen1_device_prefix (str): Gen1 resource disk prefix.
gen2_device_id (str): Gen2 resource device ID.
Returns:
str: The found device.
"""
device = None
# We have to try device IDs for both Gen1 and Gen2 VMs.
logger.info('Searching gen1 prefix {0} or gen2 {1}'.format(gen1_device_prefix, gen2_device_id))
try:
for vmbus, guid in DefaultOSUtil._enumerate_device_id():
if guid.startswith(gen1_device_prefix) or guid == gen2_device_id:
for root, dirs, files in os.walk(STORAGE_DEVICE_PATH + vmbus): # pylint: disable=W0612
root_path_parts = root.split('/')
# For Gen1 VMs we only have to check for the block dir in the
# current device. But for Gen2 VMs all of the disks (sda, sdb,
# sr0) are presented in this device on the same SCSI controller.
# Because of that we need to also read the LUN. It will be:
# 0 - OS disk
# 1 - Resource disk
# 2 - CDROM
if root_path_parts[-1] == 'block' and (
guid != gen2_device_id or
root_path_parts[-2].split(':')[-1] == '1'):
device = dirs[0]
return device
else:
# older distros
for d in dirs:
if ':' in d and "block" == d.split(':')[0]:
device = d.split(':')[1]
return device
except (OSError, IOError) as exc:
logger.warn('Error getting device for {0} or {1}: {2}', gen1_device_prefix, gen2_device_id, ustr(exc))
return None
def device_for_ide_port(self, port_id):
"""
Return device name attached to ide port 'n'.
"""
if port_id > 3:
return None
g0 = "00000000"
if port_id > 1:
g0 = "00000001"
port_id = port_id - 2
gen1_device_prefix = '{0}-000{1}'.format(g0, port_id)
device = DefaultOSUtil.search_for_resource_disk(
gen1_device_prefix=gen1_device_prefix,
gen2_device_id=GEN2_DEVICE_ID
)
logger.info('Found device: {0}'.format(device))
return device
def set_hostname_record(self, hostname):
fileutil.write_file(conf.get_published_hostname(), contents=hostname)
def get_hostname_record(self):
hostname_record = conf.get_published_hostname()
if not os.path.exists(hostname_record):
# older agents (but newer or equal to 2.2.3) create published_hostname during provisioning; when provisioning is done
# by cloud-init the hostname is written to set-hostname
hostname = self._get_cloud_init_hostname()
if hostname is None:
logger.info("Retrieving hostname using socket.gethostname()")
hostname = socket.gethostname()
logger.info('Published hostname record does not exist, creating [{0}] with hostname [{1}]', hostname_record, hostname)
self.set_hostname_record(hostname)
record = fileutil.read_file(hostname_record)
return record
@staticmethod
def _get_cloud_init_hostname():
"""
Retrieves the hostname set by cloud-init; returns None if cloud-init did not set the hostname or if there is an
error retrieving it.
"""
hostname_file = '/var/lib/cloud/data/set-hostname'
try:
if os.path.exists(hostname_file):
#
# The format is similar to
#
# $ cat /var/lib/cloud/data/set-hostname
# {
# "fqdn": "nam-u18",
# "hostname": "nam-u18"
# }
#
logger.info("Retrieving hostname from {0}", hostname_file)
with open(hostname_file, 'r') as file_:
hostname_info = json.load(file_)
if "hostname" in hostname_info:
return hostname_info["hostname"]
except Exception as exception:
logger.warn("Error retrieving hostname: {0}", ustr(exception))
return None
def del_account(self, username):
if self.is_sys_user(username):
logger.error("{0} is a system user. Will not delete it.", username)
self._run_command_without_raising(["touch", "/var/run/utmp"])
self._run_command_without_raising(['userdel', '-f', '-r', username])
self.conf_sudoer(username, remove=True)
def decode_customdata(self, data):
return base64.b64decode(data).decode('utf-8')
def get_total_mem(self):
# Get total memory in bytes and divide by 1024**2 to get the value in MB.
return os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024 ** 2)
def get_processor_cores(self):
return multiprocessing.cpu_count()
def check_pid_alive(self, pid):
try:
pid = int(pid)
os.kill(pid, 0)
except (ValueError, TypeError):
return False
except OSError as os_error:
if os_error.errno == errno.EPERM:
return True
return False
return True
@property
def is_64bit(self):
return sys.maxsize > 2 ** 32
@staticmethod
def _get_proc_stat():
"""
Get the contents of /proc/stat.
# cpu 813599 3940 909253 154538746 874851 0 6589 0 0 0
# cpu0 401094 1516 453006 77276738 452939 0 3312 0 0 0
# cpu1 412505 2423 456246 77262007 421912 0 3276 0 0 0
:return: A single string with the contents of /proc/stat
:rtype: str
"""
results = None
try:
results = fileutil.read_file('/proc/stat')
except (OSError, IOError) as ex:
logger.warn("Couldn't read /proc/stat: {0}".format(ex.strerror))
raise
return results
@staticmethod
def get_total_cpu_ticks_since_boot():
"""
Compute the number of USER_HZ units of time that have elapsed in all categories, across all cores, since boot.
:return: int
"""
system_cpu = 0
proc_stat = DefaultOSUtil._get_proc_stat()
if proc_stat is not None:
for line in proc_stat.splitlines():
if ALL_CPUS_REGEX.match(line):
system_cpu = sum(
int(i) for i in line.split()[1:8]) # see "man proc" for a description of these fields
break
return system_cpu
def get_nic_state(self, as_string=False):
"""
Capture NIC state (IPv4 and IPv6 addresses plus link state).
:return: By default returns a dictionary of NIC state objects, with the NIC name as key. If as_string is True
returns the state as a string
:rtype: dict(str,NetworkInformationCard)
"""
state = {}
all_command = ["ip", "-a", "-o", "link"]
inet_command = ["ip", "-4", "-a", "-o", "address"]
inet6_command = ["ip", "-6", "-a", "-o", "address"]
try:
all_output = shellutil.run_command(all_command)
except shellutil.CommandError as command_error:
logger.verbose("Could not fetch NIC link info: {0}", ustr(command_error))
return "" if as_string else {}
if as_string:
def run_command(command):
try:
return shellutil.run_command(command)
except shellutil.CommandError as command_error:
return str(command_error)
inet_output = run_command(inet_command)
inet6_output = run_command(inet6_command)
return "Executing {0}:\n{1}\nExecuting {2}:\n{3}\nExecuting {4}:\n{5}\n".format(all_command, all_output, inet_command, inet_output, inet6_command, inet6_output)
else:
self._update_nic_state_all(state, all_output)
self._update_nic_state(state, inet_command, NetworkInterfaceCard.add_ipv4, "an IPv4 address")
self._update_nic_state(state, inet6_command, NetworkInterfaceCard.add_ipv6, "an IPv6 address")
return state
@staticmethod
def _update_nic_state_all(state, command_output):
for entry in command_output.splitlines():
# Sample output:
# 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0 addrgenmode eui64
# 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000\ link/ether 00:0d:3a:30:c3:5a brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64
# 3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default \ link/ether 02:42:b5:d5:00:1d brd ff:ff:ff:ff:ff:ff promiscuity 0 \ bridge forward_delay 1500 hello_time 200 max_age 2000 ageing_time 30000 stp_state 0 priority 32768 vlan_filtering 0 vlan_protocol 802.1Q addrgenmode eui64
result = IP_COMMAND_OUTPUT.match(entry)
if result:
name = result.group(1)
state[name] = NetworkInterfaceCard(name, result.group(2))
@staticmethod
def _update_nic_state(state, ip_command, handler, description):
"""
Update the state of NICs based on the output of a specified ip subcommand.
:param dict(str, NetworkInterfaceCard) state: Dictionary of NIC state objects
:param str ip_command: The ip command to run
:param handler: A method on the NetworkInterfaceCard class
:param str description: Description of the particular information being added to the state
"""
try:
output = shellutil.run_command(ip_command)
for entry in output.splitlines():
# family inet sample output:
# 1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever
# 2: eth0 inet 10.145.187.220/26 brd 10.145.187.255 scope global eth0\ valid_lft forever preferred_lft forever
# 3: docker0 inet 192.168.43.1/24 brd 192.168.43.255 scope global docker0\ valid_lft forever preferred_lft forever
#
# family inet6 sample output:
# 1: lo inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever
# 2: eth0 inet6 fe80::20d:3aff:fe30:c35a/64 scope link \ valid_lft forever preferred_lft forever
result = IP_COMMAND_OUTPUT.match(entry)
if result:
interface_name = result.group(1)
if interface_name in state:
handler(state[interface_name], result.group(2))
else:
logger.error("Interface {0} has {1} but no link state".format(interface_name, description))
except shellutil.CommandError as command_error:
logger.error("[{0}] failed: {1}", ' '.join(ip_command), str(command_error))
@staticmethod
def _run_command_without_raising(cmd, log_error=True):
try:
shellutil.run_command(cmd, log_error=log_error)
# Original implementation of run() does a blanket catch, so mimicking the behaviour here
except Exception:
pass
@staticmethod
def _run_multiple_commands_without_raising(commands, log_error=True, continue_on_error=False):
for cmd in commands:
try:
shellutil.run_command(cmd, log_error=log_error)
# Original implementation of run() does a blanket catch, so mimicking the behaviour here
except Exception:
if continue_on_error:
continue
break
@staticmethod
def _run_command_raising_OSUtilError(cmd, err_msg, cmd_input=None):
# This method runs shell command using the new secure shellutil.run_command and raises OSUtilErrors on failures.
try:
return shellutil.run_command(cmd, log_error=True, input=cmd_input)
except shellutil.CommandError as e:
raise OSUtilError(
"{0}, Retcode: {1}, Output: {2}, Error: {3}".format(err_msg, e.returncode, e.stdout, e.stderr))
except Exception as e:
raise OSUtilError("{0}, Retcode: {1}, Error: {2}".format(err_msg, -1, ustr(e)))
|
apache-2.0
| -4,743,960,978,200,930,000
| 38.67211
| 356
| 0.563288
| false
| 3.955327
| false
| false
| false
|
dtiarks/ThesisPlot
|
Chap2/MoleculeMemory/memory.py
|
1
|
3776
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 11:29:40 2017
@author: daniel
"""
import Tomography as tom
import quPy as qp
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import json
import io
import scipy.constants as co
#import rydpy
c = 299792458 # m/s, speed of light CODATA 2014
a0 = 0.52917721067e-10 # m, Bohr radius
C6 = 2.3e23 * 4.36e-18 * a0**6 # Jm^6, Van-der-Waals coefficient for the 67s - 69s
hbar = 6.626070040e-34/(2 * np.pi) # Js, Planck constant, CODATA 2014
rho_peak = 2.0e12/1e-6 # peak density in cm^-3/centi^-3
d = 2.534e-29 # Cm, dipole matrix element (D. A. Steck)
Gamma_e = 2*np.pi * 6.065e6 # decay rate (D. A. Steck)
epsilon_0 = 8.854187817e-12 # dielectric constant, CODATA 2014
L = 61e-6 # medium length in m
omega_s = 2*np.pi * 384.23e12 # rad/s, transition frequency
gamma_21 = 0.0577314
chi_0 = 2*rho_peak*d**2 / (epsilon_0*hbar*Gamma_e) # prefactor of the susceptibility for the cycling transition (|R> polarization)
R_b=18e-6
n1=69
n2=100
ec=co.e
m_e=co.electron_mass
#def classicalRadius(n,l=0,j=0.5):
# return (ec**2/(4*np.pi*epsilon_0*co.h*rydpy.IonEnergy(n,l,j,units="Hz")))
#
#def orbitalRadius(n,l=0,j=0.5):
# r,rrRR,_,_=rydpy.Numerov(n,l,j)
# imax=np.argmax(rrRR)
# return r[imax]*a0
def moleculeState(t,pm,omega):
return np.cos(pm)*np.array([1,0])+np.sin(pm)*np.exp(-1j*omega*t)*np.array([0,1])
def nuMolecule(t,pm,omega):
return np.cos(pm)**4 + np.sin(pm)**4 + 2*np.sin(pm)**2*np.cos(pm)**2*np.cos(omega*t)
def nuNumerical(ts,pm,omega):
s=np.array([np.inner(moleculeState(0,pm,omega),moleculeState(tx,pm,omega)) for tx in ts])
return np.abs(s)**2
def phiNumerical(ts,pm,omega):
s=np.array([np.angle(moleculeState(tx,pm,omega)[0]+moleculeState(tx,pm,omega)[1]) for tx in ts])
return s
def alphaMu(rat):
alpha=1/(1+(rat)**2)
beta=1/(1+(1/rat)**2)
return alpha,beta
def etaStephan(eps, alpha, mu):
return alpha**2+mu**2+2*alpha*mu*np.cos(2*eps)
def phiStephan(eps, alpha, mu):
phi=np.angle(alpha*np.exp(-1j*eps)+mu*np.exp(1j*eps))
phi=-(np.arctan(((mu-alpha)/(mu+alpha))*np.tan(eps))-np.pi*np.sign(mu-alpha)*np.mod(eps/np.pi+0.5,1))
return phi
pM1=np.pi/12.
pM2=np.pi/6.
omega=2*np.pi*220e3
ts=np.linspace(0,7e-6,1000)
rat1=0.2
rat2=0.9
alpha,mu=alphaMu(rat1)
print etaStephan(1.,alpha,mu)
#nu=np.abs(np.outer(moleculeState(np.zeros_like(ts),pM,0),moleculeState(ts,pM,omega)))**2
plot_dict={}
# plot it
f=plt.figure()
h=pd.DataFrame(index=omega*ts,data=etaStephan(omega*ts,alpha,mu))
h2=pd.DataFrame(index=omega*ts,data=etaStephan(omega*ts,*alphaMu(rat2)))
plot_dict['121']={
'A':{'type':'plot','y':h[0].to_json(),'ylabel':u'$\eta_L(t)$','xlabel':r'$\omega_B t_D$ (rad)','ylim':(0,1.2),'num':'a','label':r'$r=0.2$'},
'B':{'type':'plot','y':h2[0].to_json(),'label':r'$r=0.9$'}
}
plt.subplot(121)
plt.ylabel(u'$\Delta \phi$')
plt.plot(omega*ts,etaStephan(omega*ts,alpha,mu))
plt.plot(omega*ts,etaStephan(omega*ts,*alphaMu(rat2)))
#plt.plot(1e6*ts,nuMolecule(ts,pM,omega))
#plt.axhline(1)
h=pd.DataFrame(index=omega*ts,data=phiStephan(omega*ts,*alphaMu(rat1))+0.5*np.pi)
h2=pd.DataFrame(index=omega*ts,data=phiStephan(omega*ts,*alphaMu(rat2))+0.5*np.pi)
plot_dict['122']={
'A':{'type':'plot','y':h[0].to_json(),'ylabel':r'$\phi_{L}$ (rad)','xlabel':r'$\omega_B t_D$ (rad)','num':'b','ylim':(-1,1.0)},
'B':{'type':'plot','y':h2[0].to_json(),}
}
plt.subplot(122)
plt.ylabel(u'$\Delta \phi$')
plt.plot(omega*ts,phiStephan(omega*ts,*alphaMu(rat1))-0.5*np.pi)
plt.plot(omega*ts,phiStephan(omega*ts,*alphaMu(rat2))-0.5*np.pi)
with io.open('memory.json', 'w+') as f:
f.write(unicode(json.dumps(plot_dict, ensure_ascii=False,indent=4)))
plt.show()
|
mit
| -2,405,916,120,718,962,700
| 30.214876
| 160
| 0.649894
| false
| 2.257023
| false
| false
| false
|
FedoraScientific/salome-smesh
|
src/Tools/blocFissure/gmu/partitionBlocDefaut.py
|
1
|
6971
|
# -*- coding: utf-8 -*-
import logging
from geomsmesh import geompy
# -----------------------------------------------------------------------------
# --- partition du bloc defaut par generatrice, tore et plan fissure
def partitionBlocDefaut(volDefaut, facesDefaut, gener, pipe,
facefis, ellipsoide):
"""
Partition du bloc defaut par la generatrice de l'ellipse,
le tore elliptique, la face plane de fissure
@param volDefaut : le bloc defaut
@param gener : la generatrice de l'ellipse, positionnée dans l'espace
@param pipe : le tore partitionné par le plan de fissure, positionné
dans l'espace
@param facefis : la face plane de la fissure, positionnée dans l'espace
@return (volDefautPart, blocp, tore, faceFissure), le bloc partitionné,
les sous shapes issues de la partition
(le bloc moins le tore, la generatrice, le tore, la face de fissure)
"""
logging.info("start")
volDefautPart = geompy.MakePartition([volDefaut], [pipe, facefis, ellipsoide], [], [], geompy.ShapeType["SOLID"], 0, [], 1)
blocp = geompy.GetInPlaceByHistory(volDefautPart, volDefaut)
#gencnt = geompy.GetInPlaceByHistory(volDefautPart, gener)
tore = geompy.GetInPlaceByHistory(volDefautPart, pipe)
faceFissure = geompy.GetInPlaceByHistory(volDefautPart, facefis)
#ellipsoidep =geompy.GetInPlaceByHistory(volDefautPart, ellipsoide)
geompy.addToStudy( volDefautPart, 'volDefautPart' )
geompy.addToStudyInFather( volDefautPart, tore, 'tore' )
geompy.addToStudyInFather( volDefautPart, faceFissure, 'FACE1' )
#geompy.addToStudyInFather( volDefautPart, gencnt, 'generatrice' )
solids = geompy.ExtractShapes(blocp, geompy.ShapeType["SOLID"], True)
vols = []
for i in range(len(solids)):
props = geompy.BasicProperties(solids[i])
vols.append(props[2])
maxvol = max(vols)
imaxvol = vols.index(maxvol)
blocp = solids[imaxvol]
vols[imaxvol] = 0
maxvol = max(vols)
imaxvol = vols.index(maxvol)
ellipsoidep = solids[imaxvol]
geompy.addToStudyInFather( volDefautPart, blocp, 'bloc' )
geompy.addToStudyInFather( volDefautPart, ellipsoidep, 'ellipsoide' )
sharedFaces = geompy.GetSharedShapesMulti([blocp, ellipsoidep], geompy.ShapeType["FACE"])
for i in range(len(sharedFaces)):
name = "faceCommuneEllipsoideBloc_%d"%i
geompy.addToStudyInFather(blocp, sharedFaces[i], name)
#sharedEdges = geompy.GetSharedShapesMulti([blocp, ellipsoidep], geompy.ShapeType["EDGE"])
allSharedEdges = geompy.GetSharedShapesMulti([blocp, ellipsoidep], geompy.ShapeType["EDGE"])
sharedEdges = []
for i in range(len(allSharedEdges)):
if geompy.NbShapes(allSharedEdges[i], geompy.ShapeType["VERTEX"]) > 1: # edge non degeneree
sharedEdges.append(allSharedEdges[i])
for i in range(len(sharedEdges)):
name = "edgeCommuneEllipsoideBloc_%d"%i
geompy.addToStudyInFather(blocp, sharedEdges[i], name)
facesExternes = []
facesExtBloc = []
facesExtElli = []
faces = geompy.ExtractShapes(facesDefaut, geompy.ShapeType["FACE"], True)
if len(faces) == 0:
faces = [facesDefaut]
for i in range(len(faces)):
faceExt = geompy.GetInPlace(ellipsoidep, faces[i])
if faceExt is not None:
name = "faceExterne_e%d"%i
geompy.addToStudyInFather(ellipsoidep, faceExt, name)
facesExternes.append(faceExt)
facesExtElli.append(faceExt)
faceExt = geompy.GetInPlace(blocp, faces[i])
if faceExt is not None:
name = "faceExterne_b%d"%i
geompy.addToStudyInFather(blocp, faceExt, name)
facesExternes.append(faceExt)
facesExtBloc.append(faceExt)
else:
logging.info(" recherche faces externes par GetShapesOnShape")
vertex = geompy.MakeVertexOnSurface(faces[i], 0.5, 0.5)
normal = geompy.GetNormal(faces[i], vertex)
extrusionFace = geompy.MakePrismVecH(faces[i], normal, 1)
#extrusionFace = geompy.MakePrismVecH2Ways(faces[i], normal, 0.1)
#extrusionFace = geompy.MakeScaleTransform(extrusionFace, vertex, 1.01)
name = "extrusionFace_b%d"%i
geompy.addToStudyInFather(blocp, extrusionFace, name)
#facesExt = geompy.GetShapesOnShape(extrusionFace, blocp, geompy.ShapeType["FACE"], GEOM.ST_ONIN)
facesExt = geompy.GetShapesOnShape(extrusionFace, blocp, geompy.ShapeType["FACE"], GEOM.ST_ON)
for j in range(len(facesExt)):
name = "faceExterne_b%d_%d"%(i,j)
geompy.addToStudyInFather(blocp, facesExt[j], name)
facesExternes.append(facesExt[j])
facesExtBloc.append(facesExt[j])
if len(facesExtBloc) < len(faces): # toutes les faces externes du bloc n'ont pas été trouvées. TODO eliminer les detections multiples
logging.info(" recherche faces externes par aretes partagees avec faces externes ellipsoide")
facesBloc = geompy.ExtractShapes(blocp, geompy.ShapeType["FACE"], True)
for i in range(len(facesBloc)):
notOnEllipsoide = True
for j in range(len(sharedFaces)): # eliminer les faces communes avec l'ellipsoide
if facesBloc[i].IsSame(sharedFaces[j]):
notOnEllipsoide = False
break
if notOnEllipsoide:
for j in range(len(facesExtElli)): # les faces recherchees ont une ou plusieurs edge communes avec la ou les faces externes de l'ellipsoide
allSharedEdges = []
try:
allSharedEdges += geompy.GetSharedShapesMulti([facesBloc[i], facesExtElli[j]], geompy.ShapeType["EDGE"])
except:
pass
if len(allSharedEdges) > 0:
name = "faceExterne_b%d_%d"%(i,j)
geompy.addToStudyInFather(blocp, facesBloc[i], name)
facesExternes.append(facesBloc[i])
facesExtBloc.append(facesBloc[i])
aretesInternes = []
for i in range(len(facesExternes)):
for j in range(i+1,len(facesExternes)):
shared = []
try:
shared += geompy.GetSharedShapesMulti([facesExternes[i], facesExternes[j]], geompy.ShapeType["EDGE"])
except:
logging.info("no shared edges in %s,%s",i,j)
else:
aretesInternes += shared
for i in range(len(aretesInternes)):
name = "aretesInternes_%d"%i
geompy.addToStudyInFather(blocp, aretesInternes[i], name)
edgesBords = []
for faceExtB in facesExtBloc:
edges = geompy.ExtractShapes(faceExtB, geompy.ShapeType["EDGE"], True)
for i in range(len(edges)):
isInterne = False
for j in range(len(aretesInternes)):
if edges[i].IsSame(aretesInternes[j]):
isInterne = True
break
if not isInterne:
edgesBords.append(edges[i])
name = "edgeBord%d"%i
geompy.addToStudyInFather(blocp,edges[i] , name)
group = None
if len(edgesBords) > 0:
group = geompy.CreateGroup(blocp, geompy.ShapeType["EDGE"])
geompy.UnionList(group, edgesBords)
edgesBords = group
return volDefautPart, blocp, tore, faceFissure, facesExternes, facesExtBloc, facesExtElli, aretesInternes, ellipsoidep, sharedFaces, sharedEdges, edgesBords
|
lgpl-2.1
| -5,217,617,912,348,345,000
| 42.51875
| 158
| 0.690507
| false
| 2.960459
| false
| false
| false
|
andreasBihlmaier/arni
|
arni_gui/src/arni_gui/topic_item.py
|
1
|
16458
|
from rospy.rostime import Time
import rospy
from python_qt_binding.QtCore import QTranslator
from abstract_item import AbstractItem
from helper_functions import prepare_number_for_representation, UPDATE_FREQUENCY, TOPIC_AGGREGATION_FREQUENCY, \
ROUND_DIGITS, MAXIMUM_OFFLINE_TIME
from arni_core.helper import SEUID, SEUID_DELIMITER
from node_item import NodeItem
from rospy.timer import Timer
from rospy.impl.tcpros_service import ServiceProxy
from rospy.rostime import Duration
from rospy.rostime import Time
from connection_item import ConnectionItem
import re
class TopicItem(AbstractItem):
"""
A TopicItem represents a specific topic which contains many connections and has attributes like the number of sent messages.
"""
def __init__(self, logger, seuid, first_message, parent=None):
"""Initializes the TopicItem.
:param seuid: the seuid of the item
:type seuid: str
:param logger: a logger where to log when special events occur
:type logger: ModelLogger
:param parent: the parent-item
:type parent: AbstractItem
"""
AbstractItem.__init__(self, logger, seuid, parent)
self.__parent = parent
self._type = "topic"
self.add_keys=["dropped_msgs", "traffic", "bandwidth", "frequency"]
self.avg_keys=["period_mean", "period_stddev", "stamp_age_mean", "stamp_age_stddev"]
self.max_keys=["period_max", "stamp_age_max"]
self._attributes = []
self._attributes.extend(["dropped_msgs", "traffic",
"period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max", "bandwidth", "frequency"])
for item in self._attributes:
self._add_data_list(item)
self.__calculated_data = {}
for key in self._attributes:
self.__calculated_data[key] = []
self.__calculated_data["window_stop"] = []
self.__calculated_data["window_start"] = []
for item in self._attributes:
self._rated_attributes.append(item + ".actual_value")
self._rated_attributes.append(item + ".expected_value")
self._rated_attributes.append(item + ".state")
for item in self._rated_attributes:
self._add_rated_data_list(item)
self._logger.log("info", Time.now(), seuid, "Created a new TopicItem")
self.__timer = Timer(Duration(nsecs=TOPIC_AGGREGATION_FREQUENCY), self.__aggregate_topic_data)
self.tree_items = []
self.__aggregation_window = rospy.get_param("~aggregation_window", 5)
# def _updateTimer(self, event):
# """
# Updates the timer to the last changed status. If it
# :return:
# """
# self.alive = False
# # TODO this can be very expensive - is there a better way?
# for item in self.tree_items:
# for child in item.get_childs():
# if child.alive:
# self.alive = True
# break
#
# if not self.alive:
# self.set_state("offline")
def get_child(self, row, parent=None):
"""
Returns the child at the position row.
:param row: the index of the row
:type row: int
:param parent: the model parent at the given index (not global / logical parent)
:type parent: NodeItem
:returns: the child at the position row
:rtype: AbstractItem
"""
if not isinstance(parent, NodeItem):
print(type(parent))
raise UserWarning
return self.__get_local_childs(parent)[row]
def __get_local_childs(self, parent=None):
"""
Returns all childs of the topic item at the given position in the gui.
:param parent: the model parent at the given index (not global / logical parent)
:type parent: NodeItem
:param sub_activated: Defines if subscriber shall be shown too.
:returns: the child at the position row
:rtype: AbstractItem
"""
childs = []
if parent is not None:
# a specific parent has been chosen - we have to use it to display the correct connection items
# use the seuid to determine the node and compare this to the parts in the connections item (child of this
# item.
seuid = parent.get_seuid()
seuid_helper = SEUID()
seuid_helper.identifier = seuid
seuid_helper.set_fields()
node = seuid_helper.node
for child in self.get_childs():
child_seuid = child.get_seuid()
seuid_helper.identifier = child_seuid
seuid_helper.set_fields()
node_comp = seuid_helper.publisher
# do the check on the publisher
if node == node_comp:
# match.
childs.append(child)
continue
return childs
else:
return self._child_items
def row(self, parent=None):
"""
Returns the index of the Item.
:returns: the index of the Item
:rtype: int
"""
if parent:
return parent.get_childs().index(self)
elif self.__parent:
return self.__parent.get_childs().index(self)
def child_count(self, parent=None):
"""
Returns the number of children from the AbstractItem.
:returns: number of childs
:rtype: int
"""
return len(self.__get_local_childs(parent))
def get_childs(self, parent=None):
"""
Returns a list with all children.
WARNING: This is the same method as in AbstractItem (superclass) to warn you using this function in the gui
context. Topic item shows only some connections depending on the parent node. This is *not* implemented for
this function.
:returns: list of children
:rtype: list
"""
if parent is not None:
return self.__get_local_childs(parent)
return self._child_items
def get_items_younger_than(self, time, *args):
"""
Used to overwrite the standard implementation in AbstractItem. This method provides the data from the
calculated data and *not* from the raw input. This is especially wanted when plotting
:param time:
:param args:
:return:
"""
self._data_lock.acquire()
return_values = {}
if args:
for key in args:
return_values[key] = None
if "window_stop" not in args:
return_values["window_stop"] = None
else:
for key in self.__calculated_data:
return_values[key] = None
breakpoint = 0
list_of_time = self.__calculated_data["window_stop"]
length = len(list_of_time)
if length is not 0:
if list_of_time[0] >= time:
for key in return_values:
try:
return_values[key] = self.__calculated_data[key][:]
except KeyError:
print("Accessed key was: " + key + ". Available keys are: ")
print(self.__calculated_data)
raise
else:
for i in range(length - 1, -1, -1):
if list_of_time[i] < time:
breakpoint = i + 1
for key in return_values:
if key in self.__calculated_data:
return_values[key] = self.__calculated_data[key][breakpoint:length]
else:
raise IndexError("IndexError! length of the list %s, accessed index %s. length of data"
" at given point %s, key is %s", length, i,
len(self.__calculated_data[key]), key)
break
self._data_lock.release()
return return_values
def get_raw_items_younger_than(self, time, *args):
"""
Returns all entries that are younger than time either in all keys of self._data or if args not empty in
all key corresponding to args.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
:param time: the lower bound in seconds
:type time: rospy.Time
:param args: the keys to the dict
:type args: str
:returns: dict of lists
:rtype: dict
:raises KeyError: if an element in args cannot be found in any of the dictionaries (data vs rated data)
"""
return_values = {}
if args:
for key in args:
return_values[key] = None
if "window_stop" not in args:
return_values["window_stop"] = None
else:
for key in self._data:
return_values[key] = None
breakpoint = 0
list_of_time = self._data["window_stop"]
length = len(list_of_time)
if length is not 0:
if list_of_time[0] >= time:
for key in return_values:
try:
return_values[key] = self._data[key][:]
except KeyError:
print("Accessed key was: " + key + ". Available keys are: ")
print(self._data)
raise
else:
for i in range(length - 1, -1, -1):
if list_of_time[i] < time:
breakpoint = i + 1
for key in return_values:
if key in self._data:
return_values[key] = self._data[key][breakpoint:length]
else:
raise IndexError("IndexError! length of the list %s, accessed index %s. length of data"
" at given point %s, key is %s", length, i, len(self._data[key]), key)
break
return return_values
def __aggregate_topic_data(self, event):
"""
Aggregates the topic every TOPIC_AGGREGATION_FREQUENCY nsecs and pushes the updated data to
self.__calculated_data.
:param event: containing information when this method was called - not used but needed for the interface
"""
aggregated_data = {}
for key in self._attributes:
aggregated_data[key] = 0
for key in self.__calculated_data.keys():
self.__calculated_data[key].append(0)
child_count = 0
for connection in self.get_childs(): # !assuming all childs are connection items!
values = connection.aggregate_data(self.__aggregation_window) # average over N seconds
if values:
for key in self.add_keys:
aggregated_data[key] += values[key]
for key in self.max_keys:
if values[key] > aggregated_data[key]:
aggregated_data[key] = values[key]
for key in self.avg_keys:
aggregated_data[key] += values[key]
child_count += 1
for key in self.avg_keys:
if child_count == 0:
aggregated_data[key] = 0
else:
aggregated_data[key] /= child_count
self._data_lock.acquire()
for key in self._attributes:
self.__calculated_data[key][-1] = aggregated_data[key]
self.__calculated_data["window_start"][-1] = Time.now()
self.__calculated_data["window_stop"][-1] = Time.now() - (Duration(secs=1) if int(Duration(secs=1).to_sec()) <= int(Time.now().to_sec()) else Time(0))
self._data_lock.release()
def execute_action(self, action):
"""
Not senseful, Topics cannot execute actions.
:param action: action to be executed
:type action: RemoteAction
"""
pass
def get_detailed_data(self):
"""
Returns the detailed data of the HostItem.
:returns: detailed data
:rtype: str
"""
data_dict = self.get_latest_data()
for key in self.__calculated_data:
if self.__calculated_data[key]:
data_dict[key] = self.__calculated_data[key][-1]
else:
data_dict[key] = self.tr("Currently no value available")
data_dict["state"] = self.get_state()
content = "<p class=\"detailed_data\">"
content += self.get_erroneous_entries()
content += "Rounded to a second:<br>"
if "frequency" in self._attributes:
content += self.tr("frequency") + ": " + prepare_number_for_representation(data_dict["frequency"]) \
+ " " + self.tr("frequency_unit") + " <br>"
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(data_dict["bandwidth"]) \
+ " " + self.tr("bandwidth_unit") + " <br>"
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) \
+ " " + self.tr("dropped_msgs_unit") + " <br>"
content += self.tr("period_max") + ": " + prepare_number_for_representation(data_dict["period_max"]) \
+ " " + self.tr("period_max_unit") + " <br>"
content += self.tr("stamp_age_max") + ": " + prepare_number_for_representation(data_dict["stamp_age_max"]) \
+ " " + self.tr("stamp_age_max_unit") + " <br>"
content += "</p>"
return content
def get_plotable_items(self):
"""
Returns items for the plot.
:returns: str[]
"""
if "frequency" in self.__calculated_data:
return ["dropped_msgs","stamp_age_max", "period_max",
"bandwidth", "frequency"]
else:
return ["dropped_msgs", "traffic", "stamp_age_max", "period_max", "bandwidth"]
def get_short_data(self):
"""
Returns a shortend version of the item data.
:returns: data of the item
:rtype: str
"""
data_dict = {}
for key in self.__calculated_data:
if self.__calculated_data[key]:
data_dict[key] = self.__calculated_data[key][-1]
else:
data_dict[key] = self.tr("Currently no value available")
data_dict["window_stop"] = Time(0)
data_dict["window_start"] = Time(0)
data_dict["state"] = self.get_state()
try:
if data_dict["window_stop"] == Time(0):
return "No data yet"
elif (Time.now() - data_dict["window_stop"]) > Duration(MAXIMUM_OFFLINE_TIME):
# last entry was more than MAXIMUM_OFFLINE_TIME ago, it could be offline!
return "No data since " + prepare_number_for_representation(Time.now() - data_dict["window_stop"]) \
+ " seconds"
except:
print(data_dict["window_stop"])
raise UserWarning
content = ""
if data_dict["state"] is "error":
content += self.get_erroneous_entries_for_log()
else:
content += self.tr("frequency") + ": " + prepare_number_for_representation(
data_dict["frequency"]) + " " \
+ self.tr("frequency_unit") + " - "
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(data_dict["bandwidth"]) \
+ " " + self.tr("bandwidth_unit") + " - "
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) \
+ " " + self.tr("dropped_msgs_unit")
return content
def can_execute_actions(self):
"""
This item cannot execute actions, so it returns False
:return: False
"""
return False
def get_list_items(self):
return []
def get_time_items(self):
return ["stamp_age_mean", "stamp_age_max"]
|
bsd-2-clause
| -6,375,115,065,249,970,000
| 35.251101
| 158
| 0.537429
| false
| 4.265941
| false
| false
| false
|
autopulated/yotta
|
yotta/main.py
|
2
|
10966
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
from yotta.lib import lazyregex #pylint: disable=unused-import
from yotta.lib import errors #pylint: disable=unused-import
# NOTE: argcomplete must be first!
# argcomplete, pip install argcomplete, tab-completion for argparse, Apache-2
import argcomplete
# standard library modules, , ,
import argparse
import sys
import os
# globalconf, share global arguments between modules, internal
import yotta.lib.globalconf as globalconf
# hook to support coverage information when yotta runs itself during tests:
if 'COVERAGE_PROCESS_START' in os.environ:
import coverage
coverage.process_startup()
# set __version__ using the same file that's read by setup.py when installing:
with open(os.path.join(os.path.dirname(__file__), 'version.txt')) as _version_f:
__version__ = _version_f.read().strip()
def splitList(l, at_value):
r = [[]]
for x in l:
if x == at_value:
r.append(list())
else:
r[-1].append(x)
return r
def _handleUnhandledReqestExceptions(fn):
import functools
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# requests, apache2
import requests
try:
return fn(*args, **kwargs)
except requests.exceptions.RequestException as e:
import logging
if e.request is not None:
logging.critical('%s %s failed with status %s', e.request.method, e.request.url, e.response.status_code)
sys.exit(1)
else:
raise
return wrapped
def _exitSilentlyOnUnhandledPipeError(fn):
import functools
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except IOError as e:
import errno
if e.errno == errno.EPIPE:
# unhandled pipe error -> exit silently, but with an error code
sys.exit(1)
else:
raise
return wrapped
@_exitSilentlyOnUnhandledPipeError
@_handleUnhandledReqestExceptions
def main():
# standard library modules, , ,
import logging
from functools import reduce
# logging setup, , setup the logging system, internal
from yotta.lib import logging_setup
# options, , common argument parser options, internal
import yotta.options as options
logging_setup.init(level=logging.INFO, enable_subsystems=None, plain=False)
# we override many argparse things to make options more re-usable across
# subcommands, and allow lazy loading of subcommand modules:
parser = options.parser.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description='Build software using re-usable components.\n'+
'For more detailed help on each subcommand, run: yotta <subcommand> --help'
)
subparser = parser.add_subparsers(dest='subcommand_name', metavar='<subcommand>')
parser.add_argument('--version', action='version', version=__version__,
help='display the version'
)
# add re-usable top-level options which subcommands may also accept
options.verbosity.addTo(parser)
options.debug.addTo(parser)
options.plain.addTo(parser)
options.noninteractive.addTo(parser)
options.registry.addTo(parser)
options.target.addTo(parser)
options.config.addTo(parser)
def addParser(name, module_name, description, help=None):
if help is None:
help = description
def onParserAdded(parser):
import importlib
module = importlib.import_module('.' + module_name, 'yotta')
module.addOptions(parser)
parser.set_defaults(command=module.execCommand)
subparser.add_parser_async(
name, description=description, help=help,
formatter_class=argparse.RawTextHelpFormatter,
callback=onParserAdded
)
addParser('search', 'search',
'Search for open-source modules and targets that have been published '+
'to the yotta registry (with yotta publish). See help for `yotta '+
'install` for installing modules, and for `yotta target` for '+
'switching targets.',
'Search for published modules and targets'
)
addParser('init', 'init', 'Create a new module.')
addParser('install', 'install',
'Add a specific module as a dependency, and download it, or install all '+
'dependencies for the current module. Use yotta install '+
'modulename@version to install a specific version.'
)
addParser('build', 'build',
'Build the current module. Options can be passed to the underlying '+
'build tool by passing them after --, e.g. to do a verbose build '+
'which will display each command as it is run, use:\n'+
' yotta build -- -v\n\n'+
'The programs or libraries to build can be specified (by default '+
'only the libraries needed by the current module and the current '+
"module's own tests are built). For example, to build the tests of "+
'all dependencies, run:\n yotta build all_tests\n\n',
'Build the current module.'
)
addParser('version', 'version', 'Bump the module version, or (with no arguments) display the current version.')
addParser('link', 'link',
'Symlink a module to be used into another module.\n\n'+
'Use: "yotta link" in a module to link it globally, then use "yotta '+
'link <modulename>" to link it into the module where you want to use '+
'it.\n\n'+
'"yotta link ../path/to/module" is also supported, which will create '+
'the global link and a link into the current module in a single step.',
'Symlink a module'
)
addParser('link-target', 'link_target',
'Symlink a target to be used into another module.\n\n'+
'Use: "yotta link" in a target to link it globally, then use "yotta '+
'link-target <targetname>" to link it into the module where you want to use '+
'it.\n\n'+
'"yotta link ../path/to/target" is also supported, which will create '+
'the global link and a link into the current module in a single step.',
'Symlink a target'
)
addParser('update', 'update', 'Update dependencies for the current module, or a specific module.')
addParser('target', 'target', 'Set or display the target device.')
addParser('debug', 'debug', 'Attach a debugger to the current target. Requires target support.')
addParser('test', 'test_subcommand',
'Run the tests for the current module on the current target. A build '+
'will be run first, and options to the build subcommand are also '+
'accepted by test.\nThis subcommand requires the target to provide a '+
'"test" script that will be used to run each test. Modules may also '+
'define a "testReporter" script, which will be piped the output from '+
'each test, and may produce a summary.',
'Run the tests for the current module on the current target. Requires target support for cross-compiling targets.'
)
addParser('start', 'start',
'Launch the compiled program (available for executable modules only). Requires target support for cross-compiling targets.'
)
addParser('publish', 'publish', 'Publish a module or target to the public registry.')
addParser('unpublish', 'unpublish', 'Un-publish a recently published module or target.')
addParser('login', 'login', 'Authorize for access to private github repositories and publishing to the yotta registry.')
addParser('logout', 'logout', 'Remove saved authorization token for the current user.')
addParser('whoami', 'whoami', 'Display who the currently logged in user is (if any).')
addParser('list', 'list', 'List the dependencies of the current module, or the inherited targets of the current target.')
addParser('outdated', 'outdated', 'Display information about dependencies which have newer versions available.')
addParser('uninstall', 'uninstall', 'Remove a specific dependency of the current module, both from module.json and from disk.')
addParser('remove', 'remove',
'Remove the downloaded version of a dependency module or target, or '+
'un-link a linked module or target (see yotta link --help for details '+
'of linking). This command does not modify your module.json file.',
'Remove or unlink a dependency without removing it from module.json.'
)
addParser('owners', 'owners', 'Add/remove/display the owners of a module or target.')
addParser('licenses', 'licenses', 'List the licenses of the current module and its dependencies.')
addParser('clean', 'clean', 'Remove files created by yotta and the build.')
addParser('config', 'config', 'Display the target configuration info.')
addParser('shrinkwrap', 'shrinkwrap', 'Create a yotta-shrinkwrap.json file to freeze dependency versions.')
# short synonyms, subparser.choices is a dictionary, so use update() to
# merge in the keys from another dictionary
short_commands = {
'up':subparser.choices['update'],
'in':subparser.choices['install'],
'ln':subparser.choices['link'],
'v':subparser.choices['version'],
'ls':subparser.choices['list'],
'rm':subparser.choices['remove'],
'unlink':subparser.choices['remove'],
'unlink-target':subparser.choices['remove'],
'owner':subparser.choices['owners'],
'lics':subparser.choices['licenses'],
'who':subparser.choices['whoami'],
'run':subparser.choices['start']
}
subparser.choices.update(short_commands)
# split the args into those before and after any '--'
# argument - subcommands get raw access to arguments following '--', and
# may pass them on to (for example) the build tool being used
split_args = splitList(sys.argv, '--')
following_args = reduce(lambda x,y: x + ['--'] + y, split_args[1:], [])[1:]
# complete all the things :)
argcomplete.autocomplete(
parser,
exclude = list(short_commands.keys()) + ['-d', '--debug', '-v', '--verbose']
)
# when args are passed directly we need to strip off the program name
# (hence [:1])
args = parser.parse_args(split_args[0][1:])
# set global arguments that are shared everywhere and never change
globalconf.set('interactive', args.interactive)
globalconf.set('plain', args.plain)
# finally, do stuff!
if 'command' not in args:
parser.print_usage()
sys.exit(0)
try:
status = args.command(args, following_args)
except KeyboardInterrupt:
logging.warning('interrupted')
status = -1
sys.exit(status or 0)
|
apache-2.0
| 3,106,736,014,995,484,700
| 43.217742
| 131
| 0.654933
| false
| 4.156937
| true
| false
| false
|
andrewyoung1991/scons
|
src/engine/SCons/Platform/aix.py
|
1
|
3083
|
"""engine.SCons.Platform.aix
Platform-specific initialization for IBM AIX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import subprocess
import posix
import SCons.Util
import SCons.Action
def get_xlc(env, xlc=None, packages=[]):
# Use the AIX package installer tool lslpp to figure out where a
# given xl* compiler is installed and what version it is.
xlcPath = None
xlcVersion = None
if xlc is None:
xlc = env.get('CC', 'xlc')
if SCons.Util.is_List(xlc):
xlc = xlc[0]
for package in packages:
# find the installed filename, which may be a symlink as well
pipe = SCons.Action._subproc(env, ['lslpp', '-fc', package],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
# output of lslpp is something like this:
# #Path:Fileset:File
# /usr/lib/objrepos:vac.C 6.0.0.0:/usr/vac/exe/xlCcpp
# /usr/lib/objrepos:vac.C 6.0.0.0:/usr/vac/bin/xlc_r -> /usr/vac/bin/xlc
for line in pipe.stdout:
if xlcPath:
continue # read everything to let lslpp terminate
fileset, filename = line.split(':')[1:3]
filename = filename.split()[0]
if ('/' in xlc and filename == xlc) \
or ('/' not in xlc and filename.endswith('/' + xlc)):
xlcVersion = fileset.split()[1]
xlcPath, sep, xlc = filename.rpartition('/')
pass
pass
return (xlcPath, xlc, xlcVersion)
def generate(env):
posix.generate(env)
#Based on AIX 5.2: ARG_MAX=24576 - 3000 for environment expansion
env['MAXLINELENGTH'] = 21576
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
| -2,343,016,563,431,805,000
| 35.270588
| 84
| 0.66591
| false
| 3.782822
| false
| false
| false
|
smips/Temporary_Insanity
|
TI/src/libtcodpy.py
|
1
|
62671
|
#
# libtcod 1.5.1 python wrapper
# Copyright (c) 2008,2009,2010 Jice & Mingos
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Jice or Mingos may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JICE AND MINGOS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JICE OR MINGOS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import ctypes
import struct
from ctypes import *
if not hasattr(ctypes, "c_bool"): # for Python < 2.6
c_bool = c_uint8
try: #import NumPy if available
import numpy
numpy_available = True
except ImportError:
numpy_available = False
LINUX=False
MAC=False
MINGW=False
MSVC=False
if sys.platform.find('linux') != -1:
_lib = ctypes.cdll['./libtcod.so']
LINUX=True
elif sys.platform.find('darwin') != -1:
_lib = ctypes.cdll['./libtcod.dylib']
MAC = True
elif sys.platform.find('haiku') != -1:
_lib = ctypes.cdll['./libtcod.so']
HAIKU = True
else:
try:
_lib = ctypes.cdll['./libtcod-mingw.dll']
MINGW=True
except WindowsError:
_lib = ctypes.cdll['./libtcod-VS.dll']
MSVC=True
# On Windows, ctypes doesn't work well with function returning structs,
# so we have to user the _wrapper functions instead
_lib.TCOD_color_multiply = _lib.TCOD_color_multiply_wrapper
_lib.TCOD_color_add = _lib.TCOD_color_add_wrapper
_lib.TCOD_color_multiply_scalar = _lib.TCOD_color_multiply_scalar_wrapper
_lib.TCOD_color_subtract = _lib.TCOD_color_subtract_wrapper
_lib.TCOD_color_lerp = _lib.TCOD_color_lerp_wrapper
_lib.TCOD_console_get_default_background = _lib.TCOD_console_get_default_background_wrapper
_lib.TCOD_console_get_default_foreground = _lib.TCOD_console_get_default_foreground_wrapper
_lib.TCOD_console_get_char_background = _lib.TCOD_console_get_char_background_wrapper
_lib.TCOD_console_get_char_foreground = _lib.TCOD_console_get_char_foreground_wrapper
_lib.TCOD_console_get_fading_color = _lib.TCOD_console_get_fading_color_wrapper
_lib.TCOD_image_get_pixel = _lib.TCOD_image_get_pixel_wrapper
_lib.TCOD_image_get_mipmap_pixel = _lib.TCOD_image_get_mipmap_pixel_wrapper
_lib.TCOD_parser_get_color_property = _lib.TCOD_parser_get_color_property_wrapper
HEXVERSION = 0x010501
STRVERSION = "1.5.1"
TECHVERSION = 0x01050103
############################
# color module
############################
class Color(Structure):
_fields_ = [('r', c_uint8),
('g', c_uint8),
('b', c_uint8),
]
def __eq__(self, c):
return _lib.TCOD_color_equals(self, c)
def __mul__(self, c):
if isinstance(c,Color):
return _lib.TCOD_color_multiply(self, c)
else:
return _lib.TCOD_color_multiply_scalar(self, c_float(c))
def __add__(self, c):
return _lib.TCOD_color_add(self, c)
def __sub__(self, c):
return _lib.TCOD_color_subtract(self, c)
def __repr__(self):
return "Color(%d,%d,%d)" % (self.r, self.g, self.b)
def __getitem__(self, i):
if type(i) == str:
return getattr(self, i)
else:
return getattr(self, "rgb"[i])
def __setitem__(self, i, c):
if type(i) == str:
setattr(self, i, c)
else:
setattr(self, "rgb"[i], c)
def __iter__(self):
yield self.r
yield self.g
yield self.b
# Should be valid on any platform, check it! Has to be done after Color is defined.
if MAC:
from cprotos import setup_protos
setup_protos(_lib)
_lib.TCOD_color_equals.restype = c_bool
_lib.TCOD_color_multiply.restype = Color
_lib.TCOD_color_multiply_scalar.restype = Color
_lib.TCOD_color_add.restype = Color
_lib.TCOD_color_subtract.restype = Color
# default colors
# grey levels
black=Color(0,0,0)
darkest_grey=Color(31,31,31)
darker_grey=Color(63,63,63)
dark_grey=Color(95,95,95)
grey=Color(127,127,127)
light_grey=Color(159,159,159)
lighter_grey=Color(191,191,191)
lightest_grey=Color(223,223,223)
darkest_gray=Color(31,31,31)
darker_gray=Color(63,63,63)
dark_gray=Color(95,95,95)
gray=Color(127,127,127)
light_gray=Color(159,159,159)
lighter_gray=Color(191,191,191)
lightest_gray=Color(223,223,223)
white=Color(255,255,255)
# sepia
darkest_sepia=Color(31,24,15)
darker_sepia=Color(63,50,31)
dark_sepia=Color(94,75,47)
sepia=Color(127,101,63)
light_sepia=Color(158,134,100)
lighter_sepia=Color(191,171,143)
lightest_sepia=Color(222,211,195)
#standard colors
red=Color(255,0,0)
flame=Color(255,63,0)
orange=Color(255,127,0)
amber=Color(255,191,0)
yellow=Color(255,255,0)
lime=Color(191,255,0)
chartreuse=Color(127,255,0)
green=Color(0,255,0)
sea=Color(0,255,127)
turquoise=Color(0,255,191)
cyan=Color(0,255,255)
sky=Color(0,191,255)
azure=Color(0,127,255)
blue=Color(0,0,255)
han=Color(63,0,255)
violet=Color(127,0,255)
purple=Color(191,0,255)
fuchsia=Color(255,0,255)
magenta=Color(255,0,191)
pink=Color(255,0,127)
crimson=Color(255,0,63)
# dark colors
dark_red=Color(191,0,0)
dark_flame=Color(191,47,0)
dark_orange=Color(191,95,0)
dark_amber=Color(191,143,0)
dark_yellow=Color(191,191,0)
dark_lime=Color(143,191,0)
dark_chartreuse=Color(95,191,0)
dark_green=Color(0,191,0)
dark_sea=Color(0,191,95)
dark_turquoise=Color(0,191,143)
dark_cyan=Color(0,191,191)
dark_sky=Color(0,143,191)
dark_azure=Color(0,95,191)
dark_blue=Color(0,0,191)
dark_han=Color(47,0,191)
dark_violet=Color(95,0,191)
dark_purple=Color(143,0,191)
dark_fuchsia=Color(191,0,191)
dark_magenta=Color(191,0,143)
dark_pink=Color(191,0,95)
dark_crimson=Color(191,0,47)
# darker colors
darker_red=Color(127,0,0)
darker_flame=Color(127,31,0)
darker_orange=Color(127,63,0)
darker_amber=Color(127,95,0)
darker_yellow=Color(127,127,0)
darker_lime=Color(95,127,0)
darker_chartreuse=Color(63,127,0)
darker_green=Color(0,127,0)
darker_sea=Color(0,127,63)
darker_turquoise=Color(0,127,95)
darker_cyan=Color(0,127,127)
darker_sky=Color(0,95,127)
darker_azure=Color(0,63,127)
darker_blue=Color(0,0,127)
darker_han=Color(31,0,127)
darker_violet=Color(63,0,127)
darker_purple=Color(95,0,127)
darker_fuchsia=Color(127,0,127)
darker_magenta=Color(127,0,95)
darker_pink=Color(127,0,63)
darker_crimson=Color(127,0,31)
# darkest colors
darkest_red=Color(63,0,0)
darkest_flame=Color(63,15,0)
darkest_orange=Color(63,31,0)
darkest_amber=Color(63,47,0)
darkest_yellow=Color(63,63,0)
darkest_lime=Color(47,63,0)
darkest_chartreuse=Color(31,63,0)
darkest_green=Color(0,63,0)
darkest_sea=Color(0,63,31)
darkest_turquoise=Color(0,63,47)
darkest_cyan=Color(0,63,63)
darkest_sky=Color(0,47,63)
darkest_azure=Color(0,31,63)
darkest_blue=Color(0,0,63)
darkest_han=Color(15,0,63)
darkest_violet=Color(31,0,63)
darkest_purple=Color(47,0,63)
darkest_fuchsia=Color(63,0,63)
darkest_magenta=Color(63,0,47)
darkest_pink=Color(63,0,31)
darkest_crimson=Color(63,0,15)
# light colors
light_red=Color(255,114,114)
light_flame=Color(255,149,114)
light_orange=Color(255,184,114)
light_amber=Color(255,219,114)
light_yellow=Color(255,255,114)
light_lime=Color(219,255,114)
light_chartreuse=Color(184,255,114)
light_green=Color(114,255,114)
light_sea=Color(114,255,184)
light_turquoise=Color(114,255,219)
light_cyan=Color(114,255,255)
light_sky=Color(114,219,255)
light_azure=Color(114,184,255)
light_blue=Color(114,114,255)
light_han=Color(149,114,255)
light_violet=Color(184,114,255)
light_purple=Color(219,114,255)
light_fuchsia=Color(255,114,255)
light_magenta=Color(255,114,219)
light_pink=Color(255,114,184)
light_crimson=Color(255,114,149)
#lighter colors
lighter_red=Color(255,165,165)
lighter_flame=Color(255,188,165)
lighter_orange=Color(255,210,165)
lighter_amber=Color(255,232,165)
lighter_yellow=Color(255,255,165)
lighter_lime=Color(232,255,165)
lighter_chartreuse=Color(210,255,165)
lighter_green=Color(165,255,165)
lighter_sea=Color(165,255,210)
lighter_turquoise=Color(165,255,232)
lighter_cyan=Color(165,255,255)
lighter_sky=Color(165,232,255)
lighter_azure=Color(165,210,255)
lighter_blue=Color(165,165,255)
lighter_han=Color(188,165,255)
lighter_violet=Color(210,165,255)
lighter_purple=Color(232,165,255)
lighter_fuchsia=Color(255,165,255)
lighter_magenta=Color(255,165,232)
lighter_pink=Color(255,165,210)
lighter_crimson=Color(255,165,188)
# lightest colors
lightest_red=Color(255,191,191)
lightest_flame=Color(255,207,191)
lightest_orange=Color(255,223,191)
lightest_amber=Color(255,239,191)
lightest_yellow=Color(255,255,191)
lightest_lime=Color(239,255,191)
lightest_chartreuse=Color(223,255,191)
lightest_green=Color(191,255,191)
lightest_sea=Color(191,255,223)
lightest_turquoise=Color(191,255,239)
lightest_cyan=Color(191,255,255)
lightest_sky=Color(191,239,255)
lightest_azure=Color(191,223,255)
lightest_blue=Color(191,191,255)
lightest_han=Color(207,191,255)
lightest_violet=Color(223,191,255)
lightest_purple=Color(239,191,255)
lightest_fuchsia=Color(255,191,255)
lightest_magenta=Color(255,191,239)
lightest_pink=Color(255,191,223)
lightest_crimson=Color(255,191,207)
# desaturated colors
desaturated_red=Color(127,63,63)
desaturated_flame=Color(127,79,63)
desaturated_orange=Color(127,95,63)
desaturated_amber=Color(127,111,63)
desaturated_yellow=Color(127,127,63)
desaturated_lime=Color(111,127,63)
desaturated_chartreuse=Color(95,127,63)
desaturated_green=Color(63,127,63)
desaturated_sea=Color(63,127,95)
desaturated_turquoise=Color(63,127,111)
desaturated_cyan=Color(63,127,127)
desaturated_sky=Color(63,111,127)
desaturated_azure=Color(63,95,127)
desaturated_blue=Color(63,63,127)
desaturated_han=Color(79,63,127)
desaturated_violet=Color(95,63,127)
desaturated_purple=Color(111,63,127)
desaturated_fuchsia=Color(127,63,127)
desaturated_magenta=Color(127,63,111)
desaturated_pink=Color(127,63,95)
desaturated_crimson=Color(127,63,79)
# metallic
brass=Color(191,151,96)
copper=Color(197,136,124)
gold=Color(229,191,0)
silver=Color(203,203,203)
# miscellaneous
celadon=Color(172,255,175)
peach=Color(255,159,127)
# color functions
_lib.TCOD_color_lerp.restype = Color
def color_lerp(c1, c2, a):
return _lib.TCOD_color_lerp(c1, c2, c_float(a))
def color_set_hsv(c, h, s, v):
_lib.TCOD_color_set_HSV(byref(c), c_float(h), c_float(s), c_float(v))
def color_get_hsv(c):
h = c_float()
s = c_float()
v = c_float()
_lib.TCOD_color_get_HSV(c, byref(h), byref(s), byref(v))
return h.value, s.value, v.value
def color_scale_HSV(c, scoef, vcoef) :
_lib.TCOD_color_scale_HSV(byref(c),c_float(scoef),c_float(vcoef))
def color_gen_map(colors, indexes):
ccolors = (Color * len(colors))(*colors)
cindexes = (c_int * len(indexes))(*indexes)
cres = (Color * (max(indexes) + 1))()
_lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)
return cres
############################
# console module
############################
class Key(Structure):
_fields_=[('vk', c_int),
('c', c_uint8),
('pressed', c_bool),
('lalt', c_bool),
('lctrl', c_bool),
('ralt', c_bool),
('rctrl', c_bool),
('shift', c_bool),
]
class ConsoleBuffer:
# simple console that allows direct (fast) access to cells. simplifies
# use of the "fill" functions.
def __init__(self, width, height, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# initialize with given width and height. values to fill the buffer
# are optional, defaults to black with no characters.
n = width * height
self.width = width
self.height = height
self.clear(back_r, back_g, back_b, fore_r, fore_g, fore_b, char)
def clear(self, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# clears the console. values to fill it with are optional, defaults
# to black with no characters.
n = self.width * self.height
self.back_r = [back_r] * n
self.back_g = [back_g] * n
self.back_b = [back_b] * n
self.fore_r = [fore_r] * n
self.fore_g = [fore_g] * n
self.fore_b = [fore_b] * n
self.char = [ord(char)] * n
def copy(self):
# returns a copy of this ConsoleBuffer.
other = ConsoleBuffer(0, 0)
other.width = self.width
other.height = self.height
other.back_r = list(self.back_r) # make explicit copies of all lists
other.back_g = list(self.back_g)
other.back_b = list(self.back_b)
other.fore_r = list(self.fore_r)
other.fore_g = list(self.fore_g)
other.fore_b = list(self.fore_b)
other.char = list(self.char)
return other
def set_fore(self, x, y, r, g, b, char):
# set the character and foreground color of one cell.
i = self.width * y + x
self.fore_r[i] = r
self.fore_g[i] = g
self.fore_b[i] = b
self.char[i] = ord(char)
def set_back(self, x, y, r, g, b):
# set the background color of one cell.
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
def set(self, x, y, back_r, back_g, back_b, fore_r, fore_g, fore_b, char):
# set the background color, foreground color and character of one cell.
i = self.width * y + x
self.back_r[i] = back_r
self.back_g[i] = back_g
self.back_b[i] = back_b
self.fore_r[i] = fore_r
self.fore_g[i] = fore_g
self.fore_b[i] = fore_b
self.char[i] = ord(char)
def blit(self, dest, fill_fore=True, fill_back=True):
# use libtcod's "fill" functions to write the buffer to a console.
if (console_get_width(dest) != self.width or
console_get_height(dest) != self.height):
raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')
s = struct.Struct('%di' % len(self.back_r))
if fill_back:
_lib.TCOD_console_fill_background(dest, (c_int * len(self.back_r))(*self.back_r), (c_int * len(self.back_g))(*self.back_g), (c_int * len(self.back_b))(*self.back_b))
if fill_fore:
_lib.TCOD_console_fill_foreground(dest, (c_int * len(self.fore_r))(*self.fore_r), (c_int * len(self.fore_g))(*self.fore_g), (c_int * len(self.fore_b))(*self.fore_b))
_lib.TCOD_console_fill_char(dest, (c_int * len(self.char))(*self.char))
_lib.TCOD_console_credits_render.restype = c_bool
_lib.TCOD_console_is_fullscreen.restype = c_bool
_lib.TCOD_console_is_window_closed.restype = c_bool
_lib.TCOD_console_get_default_background.restype = Color
_lib.TCOD_console_get_default_foreground.restype = Color
_lib.TCOD_console_get_char_background.restype = Color
_lib.TCOD_console_get_char_foreground.restype = Color
_lib.TCOD_console_get_fading_color.restype = Color
_lib.TCOD_console_is_key_pressed.restype = c_bool
# background rendering modes
BKGND_NONE = 0
BKGND_SET = 1
BKGND_MULTIPLY = 2
BKGND_LIGHTEN = 3
BKGND_DARKEN = 4
BKGND_SCREEN = 5
BKGND_COLOR_DODGE = 6
BKGND_COLOR_BURN = 7
BKGND_ADD = 8
BKGND_ADDA = 9
BKGND_BURN = 10
BKGND_OVERLAY = 11
BKGND_ALPH = 12
BKGND_DEFAULT=13
def BKGND_ALPHA(a):
return BKGND_ALPH | (int(a * 255) << 8)
def BKGND_ADDALPHA(a):
return BKGND_ADDA | (int(a * 255) << 8)
# non blocking key events types
KEY_PRESSED = 1
KEY_RELEASED = 2
# key codes
KEY_NONE = 0
KEY_ESCAPE = 1
KEY_BACKSPACE = 2
KEY_TAB = 3
KEY_ENTER = 4
KEY_SHIFT = 5
KEY_CONTROL = 6
KEY_ALT = 7
KEY_PAUSE = 8
KEY_CAPSLOCK = 9
KEY_PAGEUP = 10
KEY_PAGEDOWN = 11
KEY_END = 12
KEY_HOME = 13
KEY_UP = 14
KEY_LEFT = 15
KEY_RIGHT = 16
KEY_DOWN = 17
KEY_PRINTSCREEN = 18
KEY_INSERT = 19
KEY_DELETE = 20
KEY_LWIN = 21
KEY_RWIN = 22
KEY_APPS = 23
KEY_0 = 24
KEY_1 = 25
KEY_2 = 26
KEY_3 = 27
KEY_4 = 28
KEY_5 = 29
KEY_6 = 30
KEY_7 = 31
KEY_8 = 32
KEY_9 = 33
KEY_KP0 = 34
KEY_KP1 = 35
KEY_KP2 = 36
KEY_KP3 = 37
KEY_KP4 = 38
KEY_KP5 = 39
KEY_KP6 = 40
KEY_KP7 = 41
KEY_KP8 = 42
KEY_KP9 = 43
KEY_KPADD = 44
KEY_KPSUB = 45
KEY_KPDIV = 46
KEY_KPMUL = 47
KEY_KPDEC = 48
KEY_KPENTER = 49
KEY_F1 = 50
KEY_F2 = 51
KEY_F3 = 52
KEY_F4 = 53
KEY_F5 = 54
KEY_F6 = 55
KEY_F7 = 56
KEY_F8 = 57
KEY_F9 = 58
KEY_F10 = 59
KEY_F11 = 60
KEY_F12 = 61
KEY_NUMLOCK = 62
KEY_SCROLLLOCK = 63
KEY_SPACE = 64
KEY_CHAR = 65
# special chars
# single walls
CHAR_HLINE = 196
CHAR_VLINE = 179
CHAR_NE = 191
CHAR_NW = 218
CHAR_SE = 217
CHAR_SW = 192
CHAR_TEEW = 180
CHAR_TEEE = 195
CHAR_TEEN = 193
CHAR_TEES = 194
CHAR_CROSS = 197
# double walls
CHAR_DHLINE = 205
CHAR_DVLINE = 186
CHAR_DNE = 187
CHAR_DNW = 201
CHAR_DSE = 188
CHAR_DSW = 200
CHAR_DTEEW = 185
CHAR_DTEEE = 204
CHAR_DTEEN = 202
CHAR_DTEES = 203
CHAR_DCROSS = 206
# blocks
CHAR_BLOCK1 = 176
CHAR_BLOCK2 = 177
CHAR_BLOCK3 = 178
# arrows
CHAR_ARROW_N = 24
CHAR_ARROW_S = 25
CHAR_ARROW_E = 26
CHAR_ARROW_W = 27
# arrows without tail
CHAR_ARROW2_N = 30
CHAR_ARROW2_S = 31
CHAR_ARROW2_E = 16
CHAR_ARROW2_W = 17
# double arrows
CHAR_DARROW_H = 29
CHAR_DARROW_V = 18
# GUI stuff
CHAR_CHECKBOX_UNSET = 224
CHAR_CHECKBOX_SET = 225
CHAR_RADIO_UNSET = 9
CHAR_RADIO_SET = 10
# sub-pixel resolution kit
CHAR_SUBP_NW = 226
CHAR_SUBP_NE = 227
CHAR_SUBP_N = 228
CHAR_SUBP_SE = 229
CHAR_SUBP_DIAG = 230
CHAR_SUBP_E = 231
CHAR_SUBP_SW = 232
# misc characters
CHAR_BULLET = 7
CHAR_BULLET_INV = 8
CHAR_BULLET_SQUARE = 254
CHAR_CENT = 189
CHAR_CLUB = 5
CHAR_COPYRIGHT = 184
CHAR_CURRENCY = 207
CHAR_DIAMOND = 4
CHAR_DIVISION = 246
CHAR_EXCLAM_DOUBLE = 19
CHAR_FEMALE = 12
CHAR_FUNCTION = 159
CHAR_GRADE = 248
CHAR_HALF = 171
CHAR_HEART = 3
CHAR_LIGHT = 15
CHAR_MALE = 11
CHAR_MULTIPLICATION = 158
CHAR_NOTE = 13
CHAR_NOTE_DOUBLE = 14
CHAR_ONE_QUARTER = 172
CHAR_PILCROW = 20
CHAR_POUND = 156
CHAR_POW1 = 251
CHAR_POW2 = 253
CHAR_POW3 = 252
CHAR_RESERVED = 169
CHAR_SECTION = 21
CHAR_SMILIE = 1
CHAR_SMILIE_INV = 2
CHAR_SPADE = 6
CHAR_THREE_QUARTERS = 243
CHAR_UMLAUT = 249
CHAR_YEN = 190
# font flags
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_TYPE_GRAYSCALE = 4
FONT_LAYOUT_TCOD = 8
# color control codes
COLCTRL_1=1
COLCTRL_2=2
COLCTRL_3=3
COLCTRL_4=4
COLCTRL_5=5
COLCTRL_NUMBER=5
COLCTRL_FORE_RGB=6
COLCTRL_BACK_RGB=7
COLCTRL_STOP=8
# renderers
RENDERER_GLSL=0
RENDERER_OPENGL=1
RENDERER_SDL=2
NB_RENDERERS=3
# alignment
LEFT=0
RIGHT=1
CENTER=2
# initializing the console
def console_init_root(w, h, title, fullscreen=False, renderer=RENDERER_SDL):
_lib.TCOD_console_init_root(w, h, c_char_p(title), fullscreen, renderer)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_set_custom_font(fontFile, flags=FONT_LAYOUT_ASCII_INCOL, nb_char_horiz=0, nb_char_vertic=0):
_lib.TCOD_console_set_custom_font(c_char_p(fontFile), flags, nb_char_horiz, nb_char_vertic)
def console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY):
if type(asciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_code_to_font(ord(asciiCode), fontCharX,
fontCharY)
else:
_lib.TCOD_console_map_ascii_code_to_font(asciiCode, fontCharX,
fontCharY)
def console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX,
fontCharY):
if type(firstAsciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_codes_to_font(ord(firstAsciiCode), nbCodes,
fontCharX, fontCharY)
else:
_lib.TCOD_console_map_ascii_codes_to_font(firstAsciiCode, nbCodes,
fontCharX, fontCharY)
def console_map_string_to_font(s, fontCharX, fontCharY):
if type(s) == bytes:
_lib.TCOD_console_map_string_to_font(s, fontCharX, fontCharY)
else:
_lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)
def console_is_fullscreen():
return _lib.TCOD_console_is_fullscreen()
def console_set_fullscreen(fullscreen):
_lib.TCOD_console_set_fullscreen(c_int(fullscreen))
def console_is_window_closed():
return _lib.TCOD_console_is_window_closed()
def console_set_window_title(title):
_lib.TCOD_console_set_window_title(c_char_p(title))
def console_credits():
_lib.TCOD_console_credits()
def console_credits_reset():
_lib.TCOD_console_credits_reset()
def console_credits_render(x, y, alpha):
return _lib.TCOD_console_credits_render(x, y, c_int(alpha))
def console_flush():
_lib.TCOD_console_flush()
# drawing on a console
def console_set_default_background(con, col):
_lib.TCOD_console_set_default_background(con, col)
def console_set_default_foreground(con, col):
_lib.TCOD_console_set_default_foreground(con, col)
def console_clear(con):
return _lib.TCOD_console_clear(con)
def console_put_char(con, x, y, c, flag=BKGND_DEFAULT):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char(con, x, y, ord(c), flag)
else:
_lib.TCOD_console_put_char(con, x, y, c, flag)
def console_put_char_ex(con, x, y, c, fore, back):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char_ex(con, x, y, ord(c), fore, back)
else:
_lib.TCOD_console_put_char_ex(con, x, y, c, fore, back)
def console_set_char_background(con, x, y, col, flag=BKGND_SET):
_lib.TCOD_console_set_char_background(con, x, y, col, flag)
def console_set_char_foreground(con, x, y, col):
_lib.TCOD_console_set_char_foreground(con, x, y, col)
def console_set_char(con, x, y, c):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_set_char(con, x, y, ord(c))
else:
_lib.TCOD_console_set_char(con, x, y, c)
def console_set_background_flag(con, flag):
_lib.TCOD_console_set_background_flag(con, c_int(flag))
def console_get_background_flag(con):
return _lib.TCOD_console_get_background_flag(con)
def console_set_alignment(con, alignment):
_lib.TCOD_console_set_alignment(con, c_int(alignment))
def console_get_alignment(con):
return _lib.TCOD_console_get_alignment(con)
def console_print(con, x, y, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print(c_void_p(con), x, y, c_char_p(fmt))
else:
_lib.TCOD_console_print_utf(c_void_p(con), x, y, fmt)
def console_print_ex(con, x, y, flag, alignment, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print_ex(c_void_p(con), x, y, flag, alignment, c_char_p(fmt))
else:
_lib.TCOD_console_print_ex_utf(c_void_p(con), x, y, flag, alignment, fmt)
def console_print_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_print_rect_ex(con, x, y, w, h, flag, alignment, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect_ex(c_void_p(con), x, y, w, h, flag, alignment, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_ex_utf(c_void_p(con), x, y, w, h, flag, alignment, fmt)
def console_get_height_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_get_height_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_get_height_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_rect(con, x, y, w, h, clr, flag=BKGND_DEFAULT):
_lib.TCOD_console_rect(con, x, y, w, h, c_int(clr), flag)
def console_hline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_hline( con, x, y, l, flag)
def console_vline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_vline( con, x, y, l, flag)
def console_print_frame(con, x, y, w, h, clear=True, flag=BKGND_DEFAULT, fmt=0):
_lib.TCOD_console_print_frame(c_void_p(con), x, y, w, h, c_int(clear), flag, c_char_p(fmt))
def console_set_color_control(con,fore,back) :
_lib.TCOD_console_set_color_control(con,fore,back)
def console_get_default_background(con):
return _lib.TCOD_console_get_default_background(con)
def console_get_default_foreground(con):
return _lib.TCOD_console_get_default_foreground(con)
def console_get_char_background(con, x, y):
return _lib.TCOD_console_get_char_background(con, x, y)
def console_get_char_foreground(con, x, y):
return _lib.TCOD_console_get_char_foreground(con, x, y)
def console_get_char(con, x, y):
return _lib.TCOD_console_get_char(con, x, y)
def console_set_fade(fade, fadingColor):
_lib.TCOD_console_set_fade(fade, fadingColor)
##_lib.TCOD_console_set_fade_wrapper(fade, fadingColor)
def console_get_fade():
return _lib.TCOD_console_get_fade().value
def console_get_fading_color():
return _lib.TCOD_console_get_fading_color()
# handling keyboard input
def console_wait_for_keypress(flush):
k=Key()
_lib.TCOD_console_wait_for_keypress_wrapper(byref(k),c_bool(flush))
return k
def console_check_for_keypress(flags=KEY_RELEASED):
k=Key()
_lib.TCOD_console_check_for_keypress_wrapper(byref(k),c_int(flags))
return k
def console_is_key_pressed(key):
return _lib.TCOD_console_is_key_pressed(key)
def console_set_keyboard_repeat(initial_delay, interval):
_lib.TCOD_console_set_keyboard_repeat(initial_delay, interval)
def console_disable_keyboard_repeat():
_lib.TCOD_console_disable_keyboard_repeat()
# using offscreen consoles
def console_new(w, h):
return _lib.TCOD_console_new(w, h)
def console_from_file(filename):
return _lib.TCOD_console_from_file(filename)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_blit(src, x, y, w, h, dst, xdst, ydst, ffade=1.0,bfade=1.0):
_lib.TCOD_console_blit(src, x, y, w, h, dst, xdst, ydst, c_float(ffade), c_float(bfade))
def console_set_key_color(con, col):
_lib.TCOD_console_set_key_color(con, col)
def console_delete(con):
_lib.TCOD_console_delete(con)
# fast color filling
def console_fill_foreground(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_foreground(con, cr, cg, cb)
def console_fill_background(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_background(con, cr, cg, cb)
def console_fill_char(con,arr) :
if (numpy_available and isinstance(arr, numpy.ndarray) ):
#numpy arrays, use numpy's ctypes functions
arr = numpy.ascontiguousarray(arr, dtype=numpy.int_)
carr = arr.ctypes.data_as(POINTER(c_int))
else:
#otherwise convert using the struct module
carr = struct.pack('%di' % len(arr), *arr)
_lib.TCOD_console_fill_char(con, carr)
def console_load_asc(con, filename) :
_lib.TCOD_console_load_asc(con,filename)
def console_save_asc(con, filename) :
_lib.TCOD_console_save_asc(con,filename)
def console_load_apf(con, filename) :
_lib.TCOD_console_load_apf(con,filename)
def console_save_apf(con, filename) :
_lib.TCOD_console_save_apf(con,filename)
############################
# sys module
############################
_lib.TCOD_sys_get_last_frame_length.restype = c_float
_lib.TCOD_sys_elapsed_seconds.restype = c_float
# high precision time functions
def sys_set_fps(fps):
_lib.TCOD_sys_set_fps(fps)
def sys_get_fps():
return _lib.TCOD_sys_get_fps()
def sys_get_last_frame_length():
return _lib.TCOD_sys_get_last_frame_length()
def sys_sleep_milli(val):
_lib.TCOD_sys_sleep_milli(c_uint(val))
def sys_elapsed_milli():
return _lib.TCOD_sys_elapsed_milli()
def sys_elapsed_seconds():
return _lib.TCOD_sys_elapsed_seconds()
def sys_set_renderer(renderer):
_lib.TCOD_sys_set_renderer(renderer)
def sys_get_renderer():
return _lib.TCOD_sys_get_renderer()
# easy screenshots
def sys_save_screenshot(name=0):
_lib.TCOD_sys_save_screenshot(c_char_p(name))
# custom fullscreen resolution
def sys_force_fullscreen_resolution(width, height):
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
def sys_get_current_resolution():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_current_resolution(byref(w), byref(h))
return w.value, h.value
def sys_get_char_size():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_char_size(byref(w), byref(h))
return w.value, h.value
# update font bitmap
def sys_update_char(asciiCode, fontx, fonty, img, x, y) :
_lib.TCOD_sys_update_char(c_int(asciiCode),c_int(fontx),c_int(fonty),img,c_int(x),c_int(y))
# custom SDL post renderer
SDL_RENDERER_FUNC = CFUNCTYPE(None, c_void_p)
def sys_register_SDL_renderer(callback):
global sdl_renderer_func
sdl_renderer_func = SDL_RENDERER_FUNC(callback)
_lib.TCOD_sys_register_SDL_renderer(sdl_renderer_func)
# events
EVENT_KEY_PRESS=1
EVENT_KEY_RELEASE=2
EVENT_KEY=EVENT_KEY_PRESS|EVENT_KEY_RELEASE
EVENT_MOUSE_MOVE=4
EVENT_MOUSE_PRESS=8
EVENT_MOUSE_RELEASE=16
EVENT_MOUSE=EVENT_MOUSE_MOVE|EVENT_MOUSE_PRESS|EVENT_MOUSE_RELEASE
EVENT_ANY=EVENT_KEY|EVENT_MOUSE
def sys_check_for_event(mask,k,m) :
return _lib.TCOD_sys_check_for_event(c_int(mask),byref(k),byref(m))
def sys_wait_for_event(mask,k,m,flush) :
return _lib.TCOD_sys_wait_for_event(c_int(mask),byref(k),byref(m),c_bool(flush))
############################
# line module
############################
_lib.TCOD_line_step.restype = c_bool
_lib.TCOD_line.restype=c_bool
_lib.TCOD_line_step_mt.restype = c_bool
def line_init(xo, yo, xd, yd):
_lib.TCOD_line_init(xo, yo, xd, yd)
def line_step():
x = c_int()
y = c_int()
ret = _lib.TCOD_line_step(byref(x), byref(y))
if not ret:
return x.value, y.value
return None,None
def line(xo,yo,xd,yd,py_callback) :
LINE_CBK_FUNC=CFUNCTYPE(c_bool,c_int,c_int)
c_callback=LINE_CBK_FUNC(py_callback)
return _lib.TCOD_line(xo,yo,xd,yd,c_callback)
def line_iter(xo, yo, xd, yd):
data = (c_int * 9)() # struct TCOD_bresenham_data_t
_lib.TCOD_line_init_mt(xo, yo, xd, yd, data)
x = c_int(xo)
y = c_int(yo)
done = False
while not done:
yield x.value, y.value
done = _lib.TCOD_line_step_mt(byref(x), byref(y), data)
############################
# image module
############################
_lib.TCOD_image_is_pixel_transparent.restype = c_bool
_lib.TCOD_image_get_pixel.restype = Color
_lib.TCOD_image_get_mipmap_pixel.restype = Color
def image_new(width, height):
return _lib.TCOD_image_new(width, height)
def image_clear(image,col) :
_lib.TCOD_image_clear(image,col)
def image_invert(image) :
_lib.TCOD_image_invert(image)
def image_hflip(image) :
_lib.TCOD_image_hflip(image)
def image_rotate90(image, num=1) :
_lib.TCOD_image_rotate90(image,num)
def image_vflip(image) :
_lib.TCOD_image_vflip(image)
def image_scale(image, neww, newh) :
_lib.TCOD_image_scale(image,c_int(neww),c_int(newh))
def image_set_key_color(image,col) :
_lib.TCOD_image_set_key_color(image,col)
def image_get_alpha(image,x,y) :
return _lib.TCOD_image_get_alpha(image,c_int(x),c_int(y))
def image_is_pixel_transparent(image,x,y) :
return _lib.TCOD_image_is_pixel_transparent(image,c_int(x),c_int(y))
def image_load(filename):
return _lib.TCOD_image_load(c_char_p(filename))
def image_from_console(console):
return _lib.TCOD_image_from_console(console)
def image_refresh_console(image, console):
_lib.TCOD_image_refresh_console(image, console)
def image_get_size(image):
w=c_int()
h=c_int()
_lib.TCOD_image_get_size(image, byref(w), byref(h))
return w.value, h.value
def image_get_pixel(image, x, y):
return _lib.TCOD_image_get_pixel(image, x, y)
def image_get_mipmap_pixel(image, x0, y0, x1, y1):
return _lib.TCOD_image_get_mipmap_pixel(image, c_float(x0), c_float(y0),
c_float(x1), c_float(y1))
def image_put_pixel(image, x, y, col):
_lib.TCOD_image_put_pixel(image, x, y, col)
##_lib.TCOD_image_put_pixel_wrapper(image, x, y, col)
def image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle):
_lib.TCOD_image_blit(image, console, c_float(x), c_float(y), bkgnd_flag,
c_float(scalex), c_float(scaley), c_float(angle))
def image_blit_rect(image, console, x, y, w, h, bkgnd_flag):
_lib.TCOD_image_blit_rect(image, console, x, y, w, h, bkgnd_flag)
def image_blit_2x(image, console, dx, dy, sx=0, sy=0, w=-1, h=-1):
_lib.TCOD_image_blit_2x(image, console, dx,dy,sx,sy,w,h)
def image_save(image, filename):
_lib.TCOD_image_save(image, c_char_p(filename))
def image_delete(image):
_lib.TCOD_image_delete(image)
############################
# mouse module
############################
class Mouse(Structure):
_fields_=[('x', c_int),
('y', c_int),
('dx', c_int),
('dy', c_int),
('cx', c_int),
('cy', c_int),
('dcx', c_int),
('dcy', c_int),
('lbutton', c_bool),
('rbutton', c_bool),
('mbutton', c_bool),
('lbutton_pressed', c_bool),
('rbutton_pressed', c_bool),
('mbutton_pressed', c_bool),
('wheel_up', c_bool),
('wheel_down', c_bool),
]
_lib.TCOD_mouse_is_cursor_visible.restype = c_bool
def mouse_show_cursor(visible):
_lib.TCOD_mouse_show_cursor(c_int(visible))
def mouse_is_cursor_visible():
return _lib.TCOD_mouse_is_cursor_visible()
def mouse_move(x, y):
_lib.TCOD_mouse_move(x, y)
def mouse_get_status():
mouse=Mouse()
_lib.TCOD_mouse_get_status_wrapper(byref(mouse))
return mouse
############################
# parser module
############################
_lib.TCOD_struct_get_name.restype = c_char_p
_lib.TCOD_struct_is_mandatory.restype = c_bool
_lib.TCOD_parser_get_bool_property.restype = c_bool
_lib.TCOD_parser_get_float_property.restype = c_float
_lib.TCOD_parser_get_string_property.restype = c_char_p
_lib.TCOD_parser_get_color_property.restype = Color
class Dice(Structure):
_fields_=[('nb_dices', c_int),
('nb_faces', c_int),
('multiplier', c_float),
('addsub', c_float),
]
def __repr__(self):
return "Dice(%d, %d, %s, %s)" % (self.nb_dices, self.nb_faces,
self.multiplier, self.addsub)
class _CValue(Union):
_fields_=[('c',c_uint8),
('i',c_int),
('f',c_float),
('s',c_char_p),
# JBR03192012 See http://bugs.python.org/issue14354 for why these are not defined as their actual types
('col',c_uint8 * 3),
('dice',c_int * 4),
('custom',c_void_p),
]
_CFUNC_NEW_STRUCT = CFUNCTYPE(c_uint, c_void_p, c_char_p)
_CFUNC_NEW_FLAG = CFUNCTYPE(c_uint, c_char_p)
_CFUNC_NEW_PROPERTY = CFUNCTYPE(c_uint, c_char_p, c_int, _CValue)
class _CParserListener(Structure):
_fields_=[('new_struct', _CFUNC_NEW_STRUCT),
('new_flag',_CFUNC_NEW_FLAG),
('new_property',_CFUNC_NEW_PROPERTY),
('end_struct',_CFUNC_NEW_STRUCT),
('error',_CFUNC_NEW_FLAG),
]
# property types
TYPE_NONE = 0
TYPE_BOOL = 1
TYPE_CHAR = 2
TYPE_INT = 3
TYPE_FLOAT = 4
TYPE_STRING = 5
TYPE_COLOR = 6
TYPE_DICE = 7
TYPE_VALUELIST00 = 8
TYPE_VALUELIST01 = 9
TYPE_VALUELIST02 = 10
TYPE_VALUELIST03 = 11
TYPE_VALUELIST04 = 12
TYPE_VALUELIST05 = 13
TYPE_VALUELIST06 = 14
TYPE_VALUELIST07 = 15
TYPE_VALUELIST08 = 16
TYPE_VALUELIST09 = 17
TYPE_VALUELIST10 = 18
TYPE_VALUELIST11 = 19
TYPE_VALUELIST12 = 20
TYPE_VALUELIST13 = 21
TYPE_VALUELIST14 = 22
TYPE_VALUELIST15 = 23
TYPE_LIST = 1024
def _convert_TCODList(clist, typ):
res = list()
for i in range(_lib.TCOD_list_size(clist)):
elt = _lib.TCOD_list_get(clist, i)
elt = cast(elt, c_void_p)
if typ == TYPE_BOOL:
elt = c_bool.from_buffer(elt).value
elif typ == TYPE_CHAR:
elt = c_char.from_buffer(elt).value
elif typ == TYPE_INT:
elt = c_int.from_buffer(elt).value
elif typ == TYPE_FLOAT:
elt = c_float.from_buffer(elt).value
elif typ == TYPE_STRING or TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
elt = cast(elt, c_char_p).value
elif typ == TYPE_COLOR:
elt = Color.from_buffer_copy(elt)
elif typ == TYPE_DICE:
# doesn't work
elt = Dice.from_buffer_copy(elt)
res.append(elt)
return res
def parser_new():
return _lib.TCOD_parser_new()
def parser_new_struct(parser, name):
return _lib.TCOD_parser_new_struct(parser, name)
def struct_add_flag(struct, name):
_lib.TCOD_struct_add_flag(struct, name)
def struct_add_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_property(struct, name, typ, c_bool(mandatory))
def struct_add_value_list(struct, name, value_list, mandatory):
CARRAY = c_char_p * (len(value_list) + 1)
cvalue_list = CARRAY()
for i in range(len(value_list)):
cvalue_list[i] = cast(value_list[i], c_char_p)
cvalue_list[len(value_list)] = 0
_lib.TCOD_struct_add_value_list(struct, name, cvalue_list, c_bool(mandatory))
def struct_add_list_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_list_property(struct, name, typ, c_bool(mandatory))
def struct_add_structure(struct, sub_struct):
_lib.TCOD_struct_add_structure(struct, sub_struct)
def struct_get_name(struct):
return _lib.TCOD_struct_get_name(struct)
def struct_is_mandatory(struct, name):
return _lib.TCOD_struct_is_mandatory(struct, name)
def struct_get_type(struct, name):
return _lib.TCOD_struct_get_type(struct, name)
def parser_run(parser, filename, listener=0):
if listener != 0:
clistener=_CParserListener()
def value_converter(name, typ, value):
if typ == TYPE_BOOL:
return listener.new_property(name, typ, value.c == 1)
elif typ == TYPE_CHAR:
return listener.new_property(name, typ, '%c' % (value.c & 0xFF))
elif typ == TYPE_INT:
return listener.new_property(name, typ, value.i)
elif typ == TYPE_FLOAT:
return listener.new_property(name, typ, value.f)
elif typ == TYPE_STRING or \
TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
return listener.new_property(name, typ, value.s)
elif typ == TYPE_COLOR:
col = cast(value.col, POINTER(Color)).contents
return listener.new_property(name, typ, col)
elif typ == TYPE_DICE:
dice = cast(value.dice, POINTER(Dice)).contents
return listener.new_property(name, typ, dice)
elif typ & TYPE_LIST:
return listener.new_property(name, typ,
_convert_TCODList(value.custom, typ & 0xFF))
return True
clistener.new_struct = _CFUNC_NEW_STRUCT(listener.new_struct)
clistener.new_flag = _CFUNC_NEW_FLAG(listener.new_flag)
clistener.new_property = _CFUNC_NEW_PROPERTY(value_converter)
clistener.end_struct = _CFUNC_NEW_STRUCT(listener.end_struct)
clistener.error = _CFUNC_NEW_FLAG(listener.error)
_lib.TCOD_parser_run(parser, c_char_p(filename), byref(clistener))
else:
_lib.TCOD_parser_run(parser, c_char_p(filename), 0)
def parser_delete(parser):
_lib.TCOD_parser_delete(parser)
def parser_get_bool_property(parser, name):
return _lib.TCOD_parser_get_bool_property(parser, c_char_p(name))
def parser_get_int_property(parser, name):
return _lib.TCOD_parser_get_int_property(parser, c_char_p(name))
def parser_get_char_property(parser, name):
return '%c' % _lib.TCOD_parser_get_char_property(parser, c_char_p(name))
def parser_get_float_property(parser, name):
return _lib.TCOD_parser_get_float_property(parser, c_char_p(name))
def parser_get_string_property(parser, name):
return _lib.TCOD_parser_get_string_property(parser, c_char_p(name))
def parser_get_color_property(parser, name):
return _lib.TCOD_parser_get_color_property(parser, c_char_p(name))
def parser_get_dice_property(parser, name):
d = Dice()
_lib.TCOD_parser_get_dice_property_py(c_void_p(parser), c_char_p(name), byref(d))
return d
def parser_get_list_property(parser, name, typ):
clist = _lib.TCOD_parser_get_list_property(parser, c_char_p(name), c_int(typ))
return _convert_TCODList(clist, typ)
############################
# random module
############################
_lib.TCOD_random_get_float.restype = c_float
_lib.TCOD_random_get_double.restype = c_double
RNG_MT = 0
RNG_CMWC = 1
DISTRIBUTION_LINEAR = 0
DISTRIBUTION_GAUSSIAN = 1
DISTRIBUTION_GAUSSIAN_RANGE = 2
DISTRIBUTION_GAUSSIAN_INVERSE = 3
DISTRIBUTION_GAUSSIAN_RANGE_INVERSE = 4
def random_get_instance():
return _lib.TCOD_random_get_instance()
def random_new(algo=RNG_CMWC):
return _lib.TCOD_random_new(algo)
def random_new_from_seed(seed, algo=RNG_CMWC):
return _lib.TCOD_random_new_from_seed(algo,c_uint(seed))
def random_set_distribution(rnd, dist) :
_lib.TCOD_random_set_distribution(rnd, dist)
def random_get_int(rnd, mi, ma):
return _lib.TCOD_random_get_int(rnd, mi, ma)
def random_get_float(rnd, mi, ma):
return _lib.TCOD_random_get_float(rnd, c_float(mi), c_float(ma))
def random_get_double(rnd, mi, ma):
return _lib.TCOD_random_get_double(rnd, c_double(mi), c_double(ma))
def random_get_int_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_int_mean(rnd, mi, ma, mean)
def random_get_float_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_float_mean(rnd, c_float(mi), c_float(ma), c_float(mean))
def random_get_double_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_double_mean(rnd, c_double(mi), c_double(ma), c_double(mean))
def random_save(rnd):
return _lib.TCOD_random_save(rnd)
def random_restore(rnd, backup):
_lib.TCOD_random_restore(rnd, backup)
def random_delete(rnd):
_lib.TCOD_random_delete(rnd)
############################
# noise module
############################
_lib.TCOD_noise_get.restype = c_float
_lib.TCOD_noise_get_ex.restype = c_float
_lib.TCOD_noise_get_fbm.restype = c_float
_lib.TCOD_noise_get_fbm_ex.restype = c_float
_lib.TCOD_noise_get_turbulence.restype = c_float
_lib.TCOD_noise_get_turbulence_ex.restype = c_float
NOISE_DEFAULT_HURST = 0.5
NOISE_DEFAULT_LACUNARITY = 2.0
NOISE_DEFAULT = 0
NOISE_PERLIN = 1
NOISE_SIMPLEX = 2
NOISE_WAVELET = 4
_NOISE_PACKER_FUNC = (None,
(c_float * 1),
(c_float * 2),
(c_float * 3),
(c_float * 4),
)
def noise_new(dim, h=NOISE_DEFAULT_HURST, l=NOISE_DEFAULT_LACUNARITY, random=0):
return _lib.TCOD_noise_new(dim, c_float(h), c_float(l), random)
def noise_set_type(n, typ) :
_lib.TCOD_noise_set_type(n,typ)
def noise_get(n, f, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), typ)
def noise_get_fbm(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_fbm_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_get_turbulence(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_turbulence_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_delete(n):
_lib.TCOD_noise_delete(n)
############################
# fov module
############################
_lib.TCOD_map_is_in_fov.restype = c_bool
_lib.TCOD_map_is_transparent.restype = c_bool
_lib.TCOD_map_is_walkable.restype = c_bool
FOV_BASIC = 0
FOV_DIAMOND = 1
FOV_SHADOW = 2
FOV_PERMISSIVE_0 = 3
FOV_PERMISSIVE_1 = 4
FOV_PERMISSIVE_2 = 5
FOV_PERMISSIVE_3 = 6
FOV_PERMISSIVE_4 = 7
FOV_PERMISSIVE_5 = 8
FOV_PERMISSIVE_6 = 9
FOV_PERMISSIVE_7 = 10
FOV_PERMISSIVE_8 = 11
FOV_RESTRICTIVE = 12
NB_FOV_ALGORITHMS = 13
def FOV_PERMISSIVE(p) :
return FOV_PERMISSIVE_0+p
def map_new(w, h):
return _lib.TCOD_map_new(w, h)
def map_copy(source, dest):
return _lib.TCOD_map_copy(source, dest)
def map_set_properties(m, x, y, isTrans, isWalk):
_lib.TCOD_map_set_properties(m, x, y, c_int(isTrans), c_int(isWalk))
def map_clear(m,walkable=False,transparent=False):
_lib.TCOD_map_clear(m,c_int(walkable),c_int(transparent))
def map_compute_fov(m, x, y, radius=0, light_walls=True, algo=FOV_RESTRICTIVE ):
_lib.TCOD_map_compute_fov(m, x, y, c_int(radius), c_bool(light_walls), c_int(algo))
def map_is_in_fov(m, x, y):
return _lib.TCOD_map_is_in_fov(m, x, y)
def map_is_transparent(m, x, y):
return _lib.TCOD_map_is_transparent(m, x, y)
def map_is_walkable(m, x, y):
return _lib.TCOD_map_is_walkable(m, x, y)
def map_delete(m):
return _lib.TCOD_map_delete(m)
def map_get_width(map):
return _lib.TCOD_map_get_width(map)
def map_get_height(map):
return _lib.TCOD_map_get_height(map)
############################
# pathfinding module
############################
_lib.TCOD_path_compute.restype = c_bool
_lib.TCOD_path_is_empty.restype = c_bool
_lib.TCOD_path_walk.restype = c_bool
PATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)
def path_new_using_map(m, dcost=1.41):
return (_lib.TCOD_path_new_using_map(c_void_p(m), c_float(dcost)), None)
def path_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_new_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def path_compute(p, ox, oy, dx, dy):
return _lib.TCOD_path_compute(p[0], ox, oy, dx, dy)
def path_get_origin(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_origin(p[0], byref(x), byref(y))
return x.value, y.value
def path_get_destination(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_destination(p[0], byref(x), byref(y))
return x.value, y.value
def path_size(p):
return _lib.TCOD_path_size(p[0])
def path_reverse(p):
_lib.TCOD_path_reverse(p[0])
def path_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_path_get(p[0], idx, byref(x), byref(y))
return x.value, y.value
def path_is_empty(p):
return _lib.TCOD_path_is_empty(p[0])
def path_walk(p, recompute):
x = c_int()
y = c_int()
if _lib.TCOD_path_walk(p[0], byref(x), byref(y), c_int(recompute)):
return x.value, y.value
return None,None
def path_delete(p):
_lib.TCOD_path_delete(p[0])
_lib.TCOD_dijkstra_path_set.restype = c_bool
_lib.TCOD_dijkstra_is_empty.restype = c_bool
_lib.TCOD_dijkstra_path_walk.restype = c_bool
_lib.TCOD_dijkstra_get_distance.restype = c_float
def dijkstra_new(m, dcost=1.41):
return (_lib.TCOD_dijkstra_new(c_void_p(m), c_float(dcost)), None)
def dijkstra_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_dijkstra_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def dijkstra_compute(p, ox, oy):
_lib.TCOD_dijkstra_compute(p[0], c_int(ox), c_int(oy))
def dijkstra_path_set(p, x, y):
return _lib.TCOD_dijkstra_path_set(p[0], c_int(x), c_int(y))
def dijkstra_get_distance(p, x, y):
return _lib.TCOD_dijkstra_get_distance(p[0], c_int(x), c_int(y))
def dijkstra_size(p):
return _lib.TCOD_dijkstra_size(p[0])
def dijkstra_reverse(p):
_lib.TCOD_dijkstra_reverse(p[0])
def dijkstra_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_dijkstra_get(p[0], c_int(idx), byref(x), byref(y))
return x.value, y.value
def dijkstra_is_empty(p):
return _lib.TCOD_dijkstra_is_empty(p[0])
def dijkstra_path_walk(p):
x = c_int()
y = c_int()
if _lib.TCOD_dijkstra_path_walk(p[0], byref(x), byref(y)):
return x.value, y.value
return None,None
def dijkstra_delete(p):
_lib.TCOD_dijkstra_delete(p[0])
############################
# bsp module
############################
class _CBsp(Structure):
_fields_ = [('next', c_void_p),
('father', c_void_p),
('son', c_void_p),
('x', c_int),
('y', c_int),
('w', c_int),
('h', c_int),
('position', c_int),
('level', c_uint8),
('horizontal', c_bool),
]
_lib.TCOD_bsp_new_with_size.restype = POINTER(_CBsp)
_lib.TCOD_bsp_left.restype = POINTER(_CBsp)
_lib.TCOD_bsp_right.restype = POINTER(_CBsp)
_lib.TCOD_bsp_father.restype = POINTER(_CBsp)
_lib.TCOD_bsp_is_leaf.restype = c_bool
_lib.TCOD_bsp_contains.restype = c_bool
_lib.TCOD_bsp_find_node.restype = POINTER(_CBsp)
BSP_CBK_FUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
# python class encapsulating the _CBsp pointer
class Bsp(object):
def __init__(self, cnode):
pcbsp = cast(cnode, POINTER(_CBsp))
self.p = pcbsp
def getx(self):
return self.p.contents.x
def setx(self, value):
self.p.contents.x = value
x = property(getx, setx)
def gety(self):
return self.p.contents.y
def sety(self, value):
self.p.contents.y = value
y = property(gety, sety)
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def getpos(self):
return self.p.contents.position
def setpos(self, value):
self.p.contents.position = value
position = property(getpos, setpos)
def gethor(self):
return self.p.contents.horizontal
def sethor(self,value):
self.p.contents.horizontal = value
horizontal = property(gethor, sethor)
def getlev(self):
return self.p.contents.level
def setlev(self,value):
self.p.contents.level = value
level = property(getlev, setlev)
def bsp_new_with_size(x, y, w, h):
return Bsp(_lib.TCOD_bsp_new_with_size(x, y, w, h))
def bsp_split_once(node, horizontal, position):
_lib.TCOD_bsp_split_once(node.p, c_int(horizontal), position)
def bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,
maxVRatio):
_lib.TCOD_bsp_split_recursive(node.p, randomizer, nb, minHSize, minVSize,
c_float(maxHRatio), c_float(maxVRatio))
def bsp_resize(node, x, y, w, h):
_lib.TCOD_bsp_resize(node.p, x, y, w, h)
def bsp_left(node):
return Bsp(_lib.TCOD_bsp_left(node.p))
def bsp_right(node):
return Bsp(_lib.TCOD_bsp_right(node.p))
def bsp_father(node):
return Bsp(_lib.TCOD_bsp_father(node.p))
def bsp_is_leaf(node):
return _lib.TCOD_bsp_is_leaf(node.p)
def bsp_contains(node, cx, cy):
return _lib.TCOD_bsp_contains(node.p, cx, cy)
def bsp_find_node(node, cx, cy):
return Bsp(_lib.TCOD_bsp_find_node(node.p, cx, cy))
def _bsp_traverse(node, callback, userData, func):
# convert the c node into a python node
#before passing it to the actual callback
def node_converter(cnode, data):
node = Bsp(cnode)
return callback(node, data)
cbk_func = BSP_CBK_FUNC(node_converter)
func(node.p, cbk_func, userData)
def bsp_traverse_pre_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_pre_order)
def bsp_traverse_in_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_in_order)
def bsp_traverse_post_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_post_order)
def bsp_traverse_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_level_order)
def bsp_traverse_inverted_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData,
_lib.TCOD_bsp_traverse_inverted_level_order)
def bsp_remove_sons(node):
_lib.TCOD_bsp_remove_sons(node.p)
def bsp_delete(node):
_lib.TCOD_bsp_delete(node.p)
############################
# heightmap module
############################
class _CHeightMap(Structure):
_fields_=[('w', c_int),
('h', c_int),
('values', POINTER(c_float)),
]
_lib.TCOD_heightmap_new.restype = POINTER(_CHeightMap)
_lib.TCOD_heightmap_get_value.restype = c_float
_lib.TCOD_heightmap_has_land_on_border.restype = c_bool
class HeightMap(object):
def __init__(self, chm):
pchm = cast(chm, POINTER(_CHeightMap))
self.p = pchm
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def heightmap_new(w, h):
phm = _lib.TCOD_heightmap_new(w, h)
return HeightMap(phm)
def heightmap_set_value(hm, x, y, value):
_lib.TCOD_heightmap_set_value(hm.p, x, y, c_float(value))
def heightmap_add(hm, value):
_lib.TCOD_heightmap_add(hm.p, c_float(value))
def heightmap_scale(hm, value):
_lib.TCOD_heightmap_scale(hm.p, c_float(value))
def heightmap_clear(hm):
_lib.TCOD_heightmap_clear(hm.p)
def heightmap_clamp(hm, mi, ma):
_lib.TCOD_heightmap_clamp(hm.p, c_float(mi),c_float(ma))
def heightmap_copy(hm1, hm2):
_lib.TCOD_heightmap_copy(hm1.p, hm2.p)
def heightmap_normalize(hm, mi=0.0, ma=1.0):
_lib.TCOD_heightmap_normalize(hm.p, c_float(mi), c_float(ma))
def heightmap_lerp_hm(hm1, hm2, hm3, coef):
_lib.TCOD_heightmap_lerp_hm(hm1.p, hm2.p, hm3.p, c_float(coef))
def heightmap_add_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_add_hm(hm1.p, hm2.p, hm3.p)
def heightmap_multiply_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_multiply_hm(hm1.p, hm2.p, hm3.p)
def heightmap_add_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_add_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_dig_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_dig_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_rain_erosion(hm, nbDrops, erosionCoef, sedimentationCoef, rnd=0):
_lib.TCOD_heightmap_rain_erosion(hm.p, nbDrops, c_float( erosionCoef),
c_float( sedimentationCoef), rnd)
def heightmap_kernel_transform(hm, kernelsize, dx, dy, weight, minLevel,
maxLevel):
FARRAY = c_float * kernelsize
IARRAY = c_int * kernelsize
cdx = IARRAY(*dx)
cdy = IARRAY(*dy)
cweight = FARRAY(*weight)
_lib.TCOD_heightmap_kernel_transform(hm.p, kernelsize, cdx, cdy, cweight,
c_float(minLevel), c_float(maxLevel))
def heightmap_add_voronoi(hm, nbPoints, nbCoef, coef, rnd=0):
FARRAY = c_float * nbCoef
ccoef = FARRAY(*coef)
_lib.TCOD_heightmap_add_voronoi(hm.p, nbPoints, nbCoef, ccoef, rnd)
def heightmap_add_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta, scale):
_lib.TCOD_heightmap_add_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_scale_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta,
scale):
_lib.TCOD_heightmap_scale_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_dig_bezier(hm, px, py, startRadius, startDepth, endRadius,
endDepth):
IARRAY = c_int * 4
cpx = IARRAY(*px)
cpy = IARRAY(*py)
_lib.TCOD_heightmap_dig_bezier(hm.p, cpx, cpy, c_float(startRadius),
c_float(startDepth), c_float(endRadius),
c_float(endDepth))
def heightmap_get_value(hm, x, y):
return _lib.TCOD_heightmap_get_value(hm.p, x, y)
def heightmap_get_interpolated_value(hm, x, y):
return _lib.TCOD_heightmap_get_interpolated_value(hm.p, c_float(x),
c_float(y))
def heightmap_get_slope(hm, x, y):
return _lib.TCOD_heightmap_get_slope(hm.p, x, y)
def heightmap_get_normal(hm, x, y, waterLevel):
FARRAY = c_float * 3
cn = FARRAY()
_lib.TCOD_heightmap_get_normal(hm.p, c_float(x), c_float(y), cn,
c_float(waterLevel))
return cn[0], cn[1], cn[2]
def heightmap_count_cells(hm, mi, ma):
return _lib.TCOD_heightmap_count_cells(hm.p, c_float(mi), c_float(ma))
def heightmap_has_land_on_border(hm, waterlevel):
return _lib.TCOD_heightmap_has_land_on_border(hm.p, c_float(waterlevel))
def heightmap_get_minmax(hm):
mi = c_float()
ma = c_float()
_lib.TCOD_heightmap_get_minmax(hm.p, byref(mi), byref(ma))
return mi.value, ma.value
def heightmap_delete(hm):
_lib.TCOD_heightmap_delete(hm.p)
############################
# name generator module
############################
_lib.TCOD_namegen_generate.restype = c_char_p
_lib.TCOD_namegen_generate_custom.restype = c_char_p
def namegen_parse(filename,random=0) :
_lib.TCOD_namegen_parse(filename,random)
def namegen_generate(name) :
return _lib.TCOD_namegen_generate(name, 0)
def namegen_generate_custom(name, rule) :
return _lib.TCOD_namegen_generate(name, rule, 0)
def namegen_get_sets():
nb=_lib.TCOD_namegen_get_nb_sets_wrapper()
SARRAY = c_char_p * nb;
setsa = SARRAY()
_lib.TCOD_namegen_get_sets_wrapper(setsa)
return list(setsa)
def namegen_destroy() :
_lib.TCOD_namegen_destroy()
|
mit
| 6,940,255,849,136,559,000
| 30.024016
| 177
| 0.621707
| false
| 2.699475
| false
| false
| false
|
solvo/derb
|
report_builder/forms.py
|
1
|
2294
|
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from report_builder.models import Question, Answer, Report
from report_builder.report_shortcuts import get_question_permission
class QuestionForm(forms.ModelForm):
children = forms.CharField
class Meta:
model = Question
fields = ('text', 'help', 'required', 'id')
widgets = {
'text': forms.Textarea(attrs={
'rows': 6,
'placeholder': 'Write your question here',
'class': 'form-control'
}),
'help': forms.Textarea(attrs={
'cols': 80,
'rows': 5,
'placeholder': 'A little help never hurts',
'class': 'form-control'
})
}
exclude = ('order',)
class AnswerForm(forms.ModelForm):
"""
TODO: docstring
"""
def clean_text(self):
text = self.cleaned_data['text']
required = get_question_permission(self.instance.question)
if required == 1 and not text:
raise ValidationError(_('This field is required'), code='required')
return text
class Meta:
model = Answer
fields = ('annotation', 'text')
widgets = {
'annotation': forms.Textarea(attrs={
'rows': 9,
'placeholder': 'Annotations',
'class': 'form-control'
}),
'text': forms.Textarea(attrs={
'rows': 6,
'placeholder': 'Write here your answer',
'class': 'form-control'
})
}
def save(self, db_use):
instance = super(AnswerForm, self).save(db_use)
instance.display_text = instance.text
return instance
class AdminReportForm(forms.ModelForm):
'''
Form for creating and updating a Report object
This is implementation is meant to be used in the admin report view
'''
template = forms.CharField(widget=forms.HiddenInput, max_length=1024**3, initial=' ')
order = forms.CharField(widget=forms.HiddenInput, max_length=10, initial='-1')
class Meta:
model = Report
exclude = ('type', 'questions')
|
gpl-3.0
| 230,806,146,539,116,030
| 29.586667
| 89
| 0.559285
| false
| 4.542574
| false
| false
| false
|
ddiazpinto/python-redsys
|
setup.py
|
1
|
1065
|
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='redsys',
version='0.2.6',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='A simple, clean and less dependant client for handle payments through RedSys.',
long_description=README,
url='https://github.com/ddiazpinto/python-redsys',
author='David Díaz',
author_email='d.diazp@gmail.com',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=['pycrypto>=2.6,<2.7']
)
|
mit
| -1,102,163,221,218,876,400
| 33.354839
| 96
| 0.646948
| false
| 3.622449
| false
| false
| false
|
eleme/archer
|
archer/cli.py
|
1
|
5921
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append('.')
import importlib
import traceback
import click
import re
from ._compat import iteritems
from .helper import make_client
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def locate_app(app_id):
"""Attempts to locate the application."""
if app_id is None:
return find_app_in_cwd()
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
__import__(module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from .app import Archer
for attr_name in 'app', 'application':
app = getattr(module, attr_name, None)
if app is not None and isinstance(app, Archer):
return app
# Otherwise find the only object that is a Archer instance.
matches = [v for k, v in iteritems(module.__dict__)
if isinstance(v, Archer)]
if len(matches) == 1:
return matches[0]
raise NoAppException('Failed to find application in module "%s". Are '
'you sure it contains a Archer application? '
% module.__name__)
def find_app_in_cwd():
# from examples import app
# return app
trial_modules = []
for f in os.listdir(os.getcwd()):
if f.endswith('.py') and f not in ('setup.py',):
trial_modules.append(importlib.import_module(f[:-3]))
if os.path.isdir(f):
# import pdb
# pdb.set_trace()
fs = os.listdir(f)
if '__init__.py' in fs:
trial_modules.append(importlib.import_module(f))
for module in trial_modules:
try:
return find_best_app(module)
except NoAppException:
continue
raise NoAppException
class Config(object):
def __init__(self):
self.app = None
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option('--app', default=None)
@pass_config
def main(config, app):
config.app = app
@main.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=6000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=True,
help='Enable or disable the reloader. By default the reloader '
)
@pass_config
def run(config, host, port, reload):
app = locate_app(config.app)
app.run(host, port, use_reloader=reload)
@main.command('shell', short_help='Runs a shell in the app context.')
@pass_config
def shell(config):
app = locate_app(config.app)
banner = 'Python %s on %s\nApp: %s%s\n' % (
sys.version,
sys.platform,
app.name,
app.debug and ' [debug]' or '',
)
ctx = {'a': 123}
ctx.update(app.make_shell_context())
sys.path.append('.')
try:
import IPython
IPython.embed(user_ns=ctx, banner1=banner)
except ImportError:
import code
code.interact(banner=banner, local=ctx)
@main.command('call', short_help='Runs a client')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=6000,
help='The port to bind to.')
@click.argument('api')
@click.argument('arguments', required=False, nargs=-1)
@pass_config
def call(config, host, port, api, arguments):
"""
call an api with given arguments, this is a command for quickly
testing if a api is working, it's better to write test case
warning: arguments of customized thrift type not supported yet
"""
arguments = ' '.join(arguments)
if ',' in arguments:
sep = '\s*,\s*'
else:
sep = '\s+'
args = re.split(sep, arguments.strip())
params = []
for arg in args:
if ':' in arg:
value, type_ = arg.split(':')
type_ = getattr(sys.modules['__builtin__'], type_)
value = type_(value)
params.append(value)
else:
try:
params.append(int(arg))
except ValueError:
params.append(arg)
app = locate_app(config.app)
client = make_client(app.service, host, port)
try:
result = getattr(client, api)(*params)
if result is not None:
click.echo(result)
else:
click.echo('OK')
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
click.echo(traceback.format_exc(exc_traceback))
@main.command('client', short_help='Runs a client shell')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=6000,
help='The port to bind to.')
@pass_config
def client(config, host, port):
from .helper import make_client
app = locate_app(config.app)
client = make_client(app.service, host, port)
banner = 'Python %s on %s\nApp: %s%s\n' % (
sys.version,
sys.platform,
app.name,
app.debug and ' [debug]' or '',
)
ctx = {'client': client}
ctx.update(app.make_shell_context())
sys.path.append('.')
try:
import IPython
IPython.embed(user_ns=ctx, banner1=banner)
except ImportError:
import code
code.interact(banner=banner, local=ctx)
|
mit
| -3,892,443,103,610,141,000
| 26.539535
| 78
| 0.582334
| false
| 3.728589
| true
| false
| false
|
saurabh6790/frappe
|
frappe/model/document.py
|
1
|
43369
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import time
from frappe import _, msgprint, is_whitelisted
from frappe.utils import flt, cstr, now, get_datetime_str, file_lock, date_diff
from frappe.model.base_document import BaseDocument, get_controller
from frappe.model.naming import set_new_name
from six import iteritems, string_types
from werkzeug.exceptions import NotFound, Forbidden
import hashlib, json
from frappe.model import optional_fields, table_fields
from frappe.model.workflow import validate_workflow
from frappe.model.workflow import set_workflow_state_on_action
from frappe.utils.global_search import update_global_search
from frappe.integrations.doctype.webhook import run_webhooks
from frappe.desk.form.document_follow import follow_document
from frappe.desk.utils import slug
from frappe.core.doctype.server_script.server_script_utils import run_server_script_for_doc_event
# once_only validation
# methods
def get_doc(*args, **kwargs):
"""returns a frappe.model.Document object.
:param arg1: Document dict or DocType name.
:param arg2: [optional] document name.
:param for_update: [optional] select document for update.
There are multiple ways to call `get_doc`
# will fetch the latest user object (with child table) from the database
user = get_doc("User", "test@example.com")
# create a new object
user = get_doc({
"doctype":"User"
"email_id": "test@example.com",
"roles: [
{"role": "System Manager"}
]
})
# create new object with keyword arguments
user = get_doc(doctype='User', email_id='test@example.com')
# select a document for update
user = get_doc("User", "test@example.com", for_update=True)
"""
if args:
if isinstance(args[0], BaseDocument):
# already a document
return args[0]
elif isinstance(args[0], string_types):
doctype = args[0]
elif isinstance(args[0], dict):
# passed a dict
kwargs = args[0]
else:
raise ValueError('First non keyword argument must be a string or dict')
if len(args) < 2 and kwargs:
if 'doctype' in kwargs:
doctype = kwargs['doctype']
else:
raise ValueError('"doctype" is a required key')
controller = get_controller(doctype)
if controller:
return controller(*args, **kwargs)
raise ImportError(doctype)
class Document(BaseDocument):
"""All controllers inherit from `Document`."""
def __init__(self, *args, **kwargs):
"""Constructor.
:param arg1: DocType name as string or document **dict**
:param arg2: Document name, if `arg1` is DocType name.
If DocType name and document name are passed, the object will load
all values (including child documents) from the database.
"""
self.doctype = self.name = None
self._default_new_docs = {}
self.flags = frappe._dict()
if args and args[0] and isinstance(args[0], string_types):
# first arugment is doctype
if len(args)==1:
# single
self.doctype = self.name = args[0]
else:
self.doctype = args[0]
if isinstance(args[1], dict):
# filter
self.name = frappe.db.get_value(args[0], args[1], "name")
if self.name is None:
frappe.throw(_("{0} {1} not found").format(_(args[0]), args[1]),
frappe.DoesNotExistError)
else:
self.name = args[1]
if 'for_update' in kwargs:
self.flags.for_update = kwargs.get('for_update')
self.load_from_db()
return
if args and args[0] and isinstance(args[0], dict):
# first argument is a dict
kwargs = args[0]
if kwargs:
# init base document
super(Document, self).__init__(kwargs)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise ValueError('Illegal arguments')
@staticmethod
def whitelist(fn):
"""Decorator: Whitelist method to be called remotely via REST API."""
frappe.whitelist()(fn)
return fn
def reload(self):
"""Reload document from database"""
self.load_from_db()
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1, for_update=self.flags.for_update)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
super(Document, self).__init__(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import DOCTYPE_TABLE_FIELDS
table_fields = DOCTYPE_TABLE_FIELDS
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def get_latest(self):
if not getattr(self, "latest", None):
self.latest = frappe.get_doc(self.doctype, self.name)
return self.latest
def check_permission(self, permtype='read', permlevel=None):
"""Raise `frappe.PermissionError` if not permitted"""
if not self.has_permission(permtype):
self.raise_no_permission_to(permlevel or permtype)
def has_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
if self.flags.ignore_permissions:
return True
return frappe.has_permission(self.doctype, permtype, self, verbose=verbose)
def raise_no_permission_to(self, perm_type):
"""Raise `frappe.PermissionError`."""
frappe.flags.error_message = _('Insufficient Permission for {0}').format(self.doctype)
raise frappe.PermissionError
def insert(self, ignore_permissions=None, ignore_links=None, ignore_if_duplicate=False,
ignore_mandatory=None, set_name=None, set_child_names=True):
"""Insert the document in the database (as a new document).
This will check for user permissions and execute `before_insert`,
`validate`, `on_update`, `after_insert` methods if they are written.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if ignore_links!=None:
self.flags.ignore_links = ignore_links
if ignore_mandatory!=None:
self.flags.ignore_mandatory = ignore_mandatory
self.set("__islocal", True)
self.check_permission("create")
self._set_defaults()
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.run_method("before_insert")
self._validate_links()
self.set_new_name(set_name=set_name, set_child_names=set_child_names)
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.flags.in_insert = True
self.run_before_save_methods()
self._validate()
self.set_docstatus()
self.flags.in_insert = False
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
try:
self.db_insert()
except frappe.DuplicateEntryError as e:
if not ignore_if_duplicate:
raise e
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.flags.in_insert = True
if self.get("amended_from"):
self.copy_attachments_from_amended_from()
# flag to prevent creation of event update log for create and update both
# during document creation
self.flags.update_log_for_doc_creation = True
self.run_post_save_methods()
self.flags.in_insert = False
# delete __islocal
if hasattr(self, "__islocal"):
delattr(self, "__islocal")
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
if not (frappe.flags.in_migrate or frappe.local.flags.in_install or frappe.flags.in_setup_wizard):
follow_document(self.doctype, self.name, frappe.session.user)
return self
def save(self, *args, **kwargs):
"""Wrapper for _save"""
return self._save(*args, **kwargs)
def _save(self, ignore_permissions=None, ignore_version=None):
"""Save the current document in the database in the **DocType**'s table or
`tabSingles` (for single types).
This will check for user permissions and execute
`validate` before updating, `on_update` after updating triggers.
:param ignore_permissions: Do not check permissions if True.
:param ignore_version: Do not save version if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
self.flags.ignore_version = frappe.flags.in_test if ignore_version is None else ignore_version
if self.get("__islocal") or not self.get("name"):
self.insert()
return
self.check_permission("write", "save")
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.set_parent_in_children()
self.set_name_in_children()
self.validate_higher_perm_levels()
self._validate_links()
self.run_before_save_methods()
if self._action != "cancel":
self._validate()
if self._action == "update_after_submit":
self.validate_update_after_submit()
self.set_docstatus()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
self.update_children()
self.run_post_save_methods()
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
return self
def copy_attachments_from_amended_from(self):
"""Copy attachments from `amended_from`"""
from frappe.desk.form.load import get_attachments
#loop through attachments
for attach_item in get_attachments(self.doctype, self.amended_from):
#save attachments to new doc
_file = frappe.get_doc({
"doctype": "File",
"file_url": attach_item.file_url,
"file_name": attach_item.file_name,
"attached_to_name": self.name,
"attached_to_doctype": self.doctype,
"folder": "Home/Attachments"})
_file.save()
def update_children(self):
"""update child tables"""
for df in self.meta.get_table_fields():
self.update_child_table(df.fieldname, df)
def update_child_table(self, fieldname, df=None):
"""sync child table for given fieldname"""
rows = []
if not df:
df = self.meta.get_field(fieldname)
for d in self.get(df.fieldname):
d.db_update()
rows.append(d.name)
if df.options in (self.flags.ignore_children_type or []):
# do not delete rows for this because of flags
# hack for docperm :(
return
if rows:
# select rows that do not match the ones in the document
deleted_rows = frappe.db.sql("""select name from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s
and name not in ({1})""".format(df.options, ','.join(['%s'] * len(rows))),
[self.name, self.doctype, fieldname] + rows)
if len(deleted_rows) > 0:
# delete rows that do not match the ones in the document
frappe.db.sql("""delete from `tab{0}` where name in ({1})""".format(df.options,
','.join(['%s'] * len(deleted_rows))), tuple(row[0] for row in deleted_rows))
else:
# no rows found, delete all rows
frappe.db.sql("""delete from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s""".format(df.options),
(self.name, self.doctype, fieldname))
def get_doc_before_save(self):
return getattr(self, '_doc_before_save', None)
def has_value_changed(self, fieldname):
'''Returns true if value is changed before and after saving'''
previous = self.get_doc_before_save()
return previous.get(fieldname)!=self.get(fieldname) if previous else True
def set_new_name(self, force=False, set_name=None, set_child_names=True):
"""Calls `frappe.naming.set_new_name` for parent and child docs."""
if self.flags.name_set and not force:
return
# If autoname has set as Prompt (name)
if self.get("__newname"):
self.name = self.get("__newname")
self.flags.name_set = True
return
if set_name:
self.name = set_name
else:
set_new_name(self)
if set_child_names:
# set name for children
for d in self.get_all_children():
set_new_name(d)
self.flags.name_set = True
def get_title(self):
"""Get the document title based on title_field or `title` or `name`"""
return self.get(self.meta.get_title_field())
def set_title_field(self):
"""Set title field based on template"""
def get_values():
values = self.as_dict()
# format values
for key, value in iteritems(values):
if value==None:
values[key] = ""
return values
if self.meta.get("title_field")=="title":
df = self.meta.get_field(self.meta.title_field)
if df.options:
self.set(df.fieldname, df.options.format(**get_values()))
elif self.is_new() and not self.get(df.fieldname) and df.default:
# set default title for new transactions (if default)
self.set(df.fieldname, df.default.format(**get_values()))
def update_single(self, d):
"""Updates values for Single type Document in `tabSingles`."""
frappe.db.sql("""delete from `tabSingles` where doctype=%s""", self.doctype)
for field, value in iteritems(d):
if field != "doctype":
frappe.db.sql("""insert into `tabSingles` (doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
if self.doctype in frappe.db.value_cache:
del frappe.db.value_cache[self.doctype]
def set_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
if not self.creation:
self.creation = self.modified
if not self.owner:
self.owner = self.modified_by
for d in self.get_all_children():
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
frappe.flags.currently_saving.append((self.doctype, self.name))
def set_docstatus(self):
if self.docstatus==None:
self.docstatus=0
for d in self.get_all_children():
d.docstatus = self.docstatus
def _validate(self):
self._validate_mandatory()
self._validate_data_fields()
self._validate_selects()
self._validate_non_negative()
self._validate_length()
self._extract_images_from_text_editor()
self._sanitize_content()
self._save_passwords()
self.validate_workflow()
children = self.get_all_children()
for d in children:
d._validate_data_fields()
d._validate_selects()
d._validate_non_negative()
d._validate_length()
d._extract_images_from_text_editor()
d._sanitize_content()
d._save_passwords()
if self.is_new():
# don't set fields like _assign, _comments for new doc
for fieldname in optional_fields:
self.set(fieldname, None)
else:
self.validate_set_only_once()
def _validate_non_negative(self):
def get_msg(df):
if self.parentfield:
return "{} {} #{}: {} {}".format(frappe.bold(_(self.doctype)),
_("Row"), self.idx, _("Value cannot be negative for"), frappe.bold(_(df.label)))
else:
return _("Value cannot be negative for {0}: {1}").format(_(df.parent), frappe.bold(_(df.label)))
for df in self.meta.get('fields', {'non_negative': ('=', 1),
'fieldtype': ('in', ['Int', 'Float', 'Currency'])}):
if flt(self.get(df.fieldname)) < 0:
msg = get_msg(df)
frappe.throw(msg, frappe.NonNegativeError, title=_("Negative Value"))
def validate_workflow(self):
"""Validate if the workflow transition is valid"""
if frappe.flags.in_install == 'frappe': return
workflow = self.meta.get_workflow()
if workflow:
validate_workflow(self)
if not self._action == 'save':
set_workflow_state_on_action(self, workflow, self._action)
def validate_set_only_once(self):
"""Validate that fields are not changed if not in insert"""
set_only_once_fields = self.meta.get_set_only_once_fields()
if set_only_once_fields and self._doc_before_save:
# document exists before saving
for field in set_only_once_fields:
fail = False
value = self.get(field.fieldname)
original_value = self._doc_before_save.get(field.fieldname)
if field.fieldtype in table_fields:
fail = not self.is_child_table_same(field.fieldname)
elif field.fieldtype in ('Date', 'Datetime', 'Time'):
fail = str(value) != str(original_value)
else:
fail = value != original_value
if fail:
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(field.fieldname)),
frappe.CannotChangeConstantError)
return False
def is_child_table_same(self, fieldname):
"""Validate child table is same as original table before saving"""
value = self.get(fieldname)
original_value = self._doc_before_save.get(fieldname)
same = True
if len(original_value) != len(value):
same = False
else:
# check all child entries
for i, d in enumerate(original_value):
new_child = value[i].as_dict(convert_dates_to_str = True)
original_child = d.as_dict(convert_dates_to_str = True)
# all fields must be same other than modified and modified_by
for key in ('modified', 'modified_by', 'creation'):
del new_child[key]
del original_child[key]
if original_child != new_child:
same = False
break
return same
def apply_fieldlevel_read_permissions(self):
"""Remove values the user is not allowed to read (called when loading in desk)"""
if frappe.session.user == "Administrator":
return
has_higher_permlevel = False
all_fields = self.meta.fields.copy()
for table_field in self.meta.get_table_fields():
all_fields += frappe.get_meta(table_field.options).fields or []
for df in all_fields:
if df.permlevel > 0:
has_higher_permlevel = True
break
if not has_higher_permlevel:
return
has_access_to = self.get_permlevel_access('read')
for df in self.meta.fields:
if df.permlevel and not df.permlevel in has_access_to:
self.set(df.fieldname, None)
for table_field in self.meta.get_table_fields():
for df in frappe.get_meta(table_field.options).fields or []:
if df.permlevel and not df.permlevel in has_access_to:
for child in self.get(table_field.fieldname) or []:
child.set(df.fieldname, None)
def validate_higher_perm_levels(self):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
if self.flags.ignore_permissions or frappe.flags.in_install:
return
if frappe.session.user == "Administrator":
return
has_access_to = self.get_permlevel_access()
high_permlevel_fields = self.meta.get_high_permlevel_fields()
if high_permlevel_fields:
self.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
# If new record then don't reset the values for child table
if self.is_new(): return
# check for child tables
for df in self.meta.get_table_fields():
high_permlevel_fields = frappe.get_meta(df.options).get_high_permlevel_fields()
if high_permlevel_fields:
for d in self.get(df.fieldname):
d.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
def get_permlevel_access(self, permission_type='write'):
if not hasattr(self, "_has_access_to"):
self._has_access_to = {}
self._has_access_to[permission_type] = []
roles = frappe.get_roles()
for perm in self.get_permissions():
if perm.role in roles and perm.get(permission_type):
if perm.permlevel not in self._has_access_to[permission_type]:
self._has_access_to[permission_type].append(perm.permlevel)
return self._has_access_to[permission_type]
def has_permlevel_access_to(self, fieldname, df=None, permission_type='read'):
if not df:
df = self.meta.get_field(fieldname)
return df.permlevel in self.get_permlevel_access(permission_type)
def get_permissions(self):
if self.meta.istable:
# use parent permissions
permissions = frappe.get_meta(self.parenttype).permissions
else:
permissions = self.meta.permissions
return permissions
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype, as_dict=True)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options, as_dict=True)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
"""Checks if `modified` timestamp provided by document being updated is same as the
`modified` timestamp in the database. If there is a different, the document has been
updated in the database after the current copy was read. Will throw an error if
timestamps don't match.
Will also validate document transitions (Save > Submit > Cancel) calling
`self.check_docstatus_transition`."""
conflict = False
self._action = "save"
if not self.get('__islocal') and not self.meta.get('is_virtual'):
if self.meta.issingle:
modified = frappe.db.sql("""select value from tabSingles
where doctype=%s and field='modified' for update""", self.doctype)
modified = modified and modified[0][0]
if modified and modified != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.sql("""select modified, docstatus from `tab{0}`
where name = %s for update""".format(self.doctype), self.name, as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
else:
tmp = tmp[0]
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, docstatus):
"""Ensures valid `docstatus` transition.
Valid transitions are (number in brackets is `docstatus`):
- Save (0) > Save (0)
- Save (0) > Submit (1)
- Submit (1) > Submit (1)
- Submit (1) > Cancel (2)
"""
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
self.check_permission("submit")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 0 to 2"))
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
self.check_permission("submit")
elif self.docstatus==2:
self._action = "cancel"
self.check_permission("cancel")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 1 to 0"))
elif docstatus==2:
raise frappe.ValidationError(_("Cannot edit cancelled document"))
def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def set_name_in_children(self):
# Set name for any new children
for d in self.get_all_children():
if not d.name:
set_new_name(d)
def validate_update_after_submit(self):
if self.flags.ignore_validate_update_after_submit:
return
self._validate_update_after_submit()
for d in self.get_all_children():
if d.is_new() and self.meta.get_field(d.parentfield).allow_on_submit:
# in case of a new row, don't validate allow on submit, if table is allow on submit
continue
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.flags.ignore_mandatory:
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
if frappe.flags.print_messages:
print(self.as_json().encode("utf-8"))
raise frappe.MandatoryError('[{doctype}, {name}]: {fields}'.format(
fields=", ".join((each[0] for each in missing)),
doctype=self.doctype,
name=self.name))
def _validate_links(self):
if self.flags.ignore_links or self._action == "cancel":
return
invalid_links, cancelled_links = self.get_invalid_links()
for d in self.get_all_children():
result = d.get_invalid_links(is_submittable=self.meta.is_submittable)
invalid_links.extend(result[0])
cancelled_links.extend(result[1])
if invalid_links:
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
if cancelled_links:
msg = ", ".join((each[2] for each in cancelled_links))
frappe.throw(_("Cannot link cancelled document: {0}").format(msg),
frappe.CancelledLinkError)
def get_all_children(self, parenttype=None):
"""Returns all children documents from **Table** type field in a list."""
ret = []
for df in self.meta.get("fields", {"fieldtype": ['in', table_fields]}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in hooks"""
if "flags" in kwargs:
del kwargs["flags"]
if hasattr(self, method) and hasattr(getattr(self, method), "__call__"):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
else:
# hack! to run hooks even if method does not exist
fn = lambda self, *args, **kwargs: None
fn.__name__ = str(method)
out = Document.hook(fn)(self, *args, **kwargs)
self.run_notifications(method)
run_webhooks(self, method)
run_server_script_for_doc_event(self, method)
return out
def run_trigger(self, method, *args, **kwargs):
return self.run_method(method, *args, **kwargs)
def run_notifications(self, method):
"""Run notifications for this method"""
if (frappe.flags.in_import and frappe.flags.mute_emails) or frappe.flags.in_patch or frappe.flags.in_install:
return
if self.flags.notifications_executed==None:
self.flags.notifications_executed = []
from frappe.email.doctype.notification.notification import evaluate_alert
if self.flags.notifications == None:
alerts = frappe.cache().hget('notifications', self.doctype)
if alerts==None:
alerts = frappe.get_all('Notification', fields=['name', 'event', 'method'],
filters={'enabled': 1, 'document_type': self.doctype})
frappe.cache().hset('notifications', self.doctype, alerts)
self.flags.notifications = alerts
if not self.flags.notifications:
return
def _evaluate_alert(alert):
if not alert.name in self.flags.notifications_executed:
evaluate_alert(self, alert.name, alert.event)
self.flags.notifications_executed.append(alert.name)
event_map = {
"on_update": "Save",
"after_insert": "New",
"on_submit": "Submit",
"on_cancel": "Cancel"
}
if not self.flags.in_insert:
# value change is not applicable in insert
event_map['on_change'] = 'Value Change'
for alert in self.flags.notifications:
event = event_map.get(method, None)
if event and alert.event == event:
_evaluate_alert(alert)
elif alert.event=='Method' and method == alert.method:
_evaluate_alert(alert)
@whitelist.__func__
def _submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self.docstatus = 1
self.save()
@whitelist.__func__
def _cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self.docstatus = 2
self.save()
@whitelist.__func__
def submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self._submit()
@whitelist.__func__
def cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self._cancel()
def delete(self, ignore_permissions=False):
"""Delete document."""
frappe.delete_doc(self.doctype, self.name, ignore_permissions = ignore_permissions, flags=self.flags)
def run_before_save_methods(self):
"""Run standard methods before `INSERT` or `UPDATE`. Standard Methods are:
- `validate`, `before_save` for **Save**.
- `validate`, `before_submit` for **Submit**.
- `before_cancel` for **Cancel**
- `before_update_after_submit` for **Update after Submit**
Will also update title_field if set"""
self.load_doc_before_save()
self.reset_seen()
# before_validate method should be executed before ignoring validations
if self._action in ("save", "submit"):
self.run_method("before_validate")
if self.flags.ignore_validate:
return
if self._action=="save":
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
self.set_title_field()
def load_doc_before_save(self):
"""Save load document from db before saving"""
self._doc_before_save = None
if not self.is_new():
try:
self._doc_before_save = frappe.get_doc(self.doctype, self.name)
except frappe.DoesNotExistError:
self._doc_before_save = None
frappe.clear_last_message()
def run_post_save_methods(self):
"""Run standard methods after `INSERT` or `UPDATE`. Standard Methods are:
- `on_update` for **Save**.
- `on_update`, `on_submit` for **Submit**.
- `on_cancel` for **Cancel**
- `update_after_submit` for **Update after Submit**"""
doc_before_save = self.get_doc_before_save()
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
elif self._action=="cancel":
self.run_method("on_cancel")
self.check_no_back_links_exist()
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
self.clear_cache()
self.notify_update()
update_global_search(self)
self.save_version()
self.run_method('on_change')
if (self.doctype, self.name) in frappe.flags.currently_saving:
frappe.flags.currently_saving.remove((self.doctype, self.name))
self.latest = None
def clear_cache(self):
frappe.clear_document_cache(self.doctype, self.name)
def reset_seen(self):
"""Clear _seen property and set current user as seen"""
if getattr(self.meta, 'track_seen', False):
frappe.db.set_value(self.doctype, self.name, "_seen", json.dumps([frappe.session.user]), update_modified=False)
def notify_update(self):
"""Publish realtime that the current document is modified"""
if frappe.flags.in_patch: return
frappe.publish_realtime("doc_update", {"modified": self.modified, "doctype": self.doctype, "name": self.name},
doctype=self.doctype, docname=self.name, after_commit=True)
if not self.meta.get("read_only") and not self.meta.get("issingle") and \
not self.meta.get("istable"):
data = {
"doctype": self.doctype,
"name": self.name,
"user": frappe.session.user
}
frappe.publish_realtime("list_update", data, after_commit=True)
def db_set(self, fieldname, value=None, update_modified=True, notify=False, commit=False):
"""Set a value in the document object, update the timestamp and update the database.
WARNING: This method does not trigger controller validations and should
be used very carefully.
:param fieldname: fieldname of the property to be updated, or a {"field":"value"} dictionary
:param value: value of the property to be updated
:param update_modified: default True. updates the `modified` and `modified_by` properties
:param notify: default False. run doc.notify_updated() to send updates via socketio
:param commit: default False. run frappe.db.commit()
"""
if isinstance(fieldname, dict):
self.update(fieldname)
else:
self.set(fieldname, value)
if update_modified and (self.doctype, self.name) not in frappe.flags.currently_saving:
# don't update modified timestamp if called from post save methods
# like on_update or on_submit
self.set("modified", now())
self.set("modified_by", frappe.session.user)
self.load_doc_before_save()
# to trigger notification on value change
self.run_method('before_change')
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
self.run_method('on_change')
if notify:
self.notify_update()
self.clear_cache()
if commit:
frappe.db.commit()
def db_get(self, fieldname):
"""get database value for this fieldname"""
return frappe.db.get_value(self.doctype, self.name, fieldname)
def check_no_back_links_exist(self):
"""Check if document links to any active document before Cancel."""
from frappe.model.delete_doc import check_if_doc_is_linked, check_if_doc_is_dynamically_linked
if not self.flags.ignore_links:
check_if_doc_is_linked(self, method="Cancel")
check_if_doc_is_dynamically_linked(self, method="Cancel")
def save_version(self):
"""Save version info"""
# don't track version under following conditions
if (not getattr(self.meta, 'track_changes', False)
or self.doctype == 'Version'
or self.flags.ignore_version
or frappe.flags.in_install
or (not self._doc_before_save and frappe.flags.in_patch)):
return
version = frappe.new_doc('Version')
if not self._doc_before_save:
version.for_insert(self)
version.insert(ignore_permissions=True)
elif version.set_diff(self._doc_before_save, self):
version.insert(ignore_permissions=True)
if not frappe.flags.in_migrate:
# follow since you made a change?
follow_document(self.doctype, self.name, frappe.session.user)
@staticmethod
def hook(f):
"""Decorator: Make method `hookable` (i.e. extensible by another app).
Note: If each hooked method returns a value (dict), then all returns are
collated in one dict and returned. Ideally, don't return values in hookable
methods, set properties in the document."""
def add_to_return_value(self, new_return_value):
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value or self.get("_return_value")
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_doc_hooks()
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def is_whitelisted(self, method_name):
method = getattr(self, method_name, None)
if not method:
raise NotFound("Method {0} not found".format(method_name))
is_whitelisted(getattr(method, '__func__', method))
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""Check that value of fieldname should be 'condition' val2
else throw Exception."""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
val1 = doc.get_value(fieldname)
df = doc.meta.get_field(fieldname)
val2 = doc.cast(val2, df)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.parentfield:
msg = _("Incorrect value in row {0}: {1} must be {2} {3}").format(doc.idx, label, condition_str, val2)
else:
msg = _("Incorrect value: {0} must be {1} {2}").format(label, condition_str, val2)
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
"""Raise exception if Table field is empty."""
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
"""Round floats for all `Currency`, `Float`, `Percent` fields for the given doc.
:param doc: Document whose numeric properties are to be rounded.
:param fieldnames: [Optional] List of fields to be rounded."""
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float", "Percent"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.parentfield)))
def get_url(self):
"""Returns Desk URL for this document. `/app/{doctype}/{name}`"""
return f"/app/{slug(self.doctype)}/{self.name}"
def add_comment(self, comment_type='Comment', text=None, comment_email=None, link_doctype=None, link_name=None, comment_by=None):
"""Add a comment to this document.
:param comment_type: e.g. `Comment`. See Communication for more info."""
out = frappe.get_doc({
"doctype":"Comment",
'comment_type': comment_type,
"comment_email": comment_email or frappe.session.user,
"comment_by": comment_by,
"reference_doctype": self.doctype,
"reference_name": self.name,
"content": text or comment_type,
"link_doctype": link_doctype,
"link_name": link_name
}).insert(ignore_permissions=True)
return out
def add_seen(self, user=None):
"""add the given/current user to list of users who have seen this document (_seen)"""
if not user:
user = frappe.session.user
if self.meta.track_seen:
_seen = self.get('_seen') or []
_seen = frappe.parse_json(_seen)
if user not in _seen:
_seen.append(user)
frappe.db.set_value(self.doctype, self.name, '_seen', json.dumps(_seen), update_modified=False)
frappe.local.flags.commit = True
def add_viewed(self, user=None):
"""add log to communication when a user views a document"""
if not user:
user = frappe.session.user
if hasattr(self.meta, 'track_views') and self.meta.track_views:
frappe.get_doc({
"doctype": "View Log",
"viewed_by": frappe.session.user,
"reference_doctype": self.doctype,
"reference_name": self.name,
}).insert(ignore_permissions=True)
frappe.local.flags.commit = True
def get_signature(self):
"""Returns signature (hash) for private URL."""
return hashlib.sha224(get_datetime_str(self.creation).encode()).hexdigest()
def get_liked_by(self):
liked_by = getattr(self, "_liked_by", None)
if liked_by:
return json.loads(liked_by)
else:
return []
def set_onload(self, key, value):
if not self.get("__onload"):
self.set("__onload", frappe._dict())
self.get("__onload")[key] = value
def get_onload(self, key=None):
if not key:
return self.get("__onload", frappe._dict())
return self.get('__onload')[key]
def queue_action(self, action, **kwargs):
"""Run an action in background. If the action has an inner function,
like _submit for submit, it will call that instead"""
# call _submit instead of submit, so you can override submit to call
# run_delayed based on some action
# See: Stock Reconciliation
from frappe.utils.background_jobs import enqueue
if hasattr(self, '_' + action):
action = '_' + action
if file_lock.lock_exists(self.get_signature()):
frappe.throw(_('This document is currently queued for execution. Please try again'),
title=_('Document Queued'))
self.lock()
enqueue('frappe.model.document.execute_action', doctype=self.doctype, name=self.name,
action=action, **kwargs)
def lock(self, timeout=None):
"""Creates a lock file for the given document. If timeout is set,
it will retry every 1 second for acquiring the lock again
:param timeout: Timeout in seconds, default 0"""
signature = self.get_signature()
if file_lock.lock_exists(signature):
lock_exists = True
if timeout:
for i in range(timeout):
time.sleep(1)
if not file_lock.lock_exists(signature):
lock_exists = False
break
if lock_exists:
raise frappe.DocumentLockedError
file_lock.create_lock(signature)
def unlock(self):
"""Delete the lock file for this document"""
file_lock.delete_lock(self.get_signature())
# validation helpers
def validate_from_to_dates(self, from_date_field, to_date_field):
"""
Generic validation to verify date sequence
"""
if date_diff(self.get(to_date_field), self.get(from_date_field)) < 0:
frappe.throw(_('{0} must be after {1}').format(
frappe.bold(self.meta.get_label(to_date_field)),
frappe.bold(self.meta.get_label(from_date_field)),
), frappe.exceptions.InvalidDates)
def get_assigned_users(self):
assignments = frappe.get_all('ToDo',
fields=['owner'],
filters={
'reference_type': self.doctype,
'reference_name': self.name,
'status': ('!=', 'Cancelled'),
})
users = set([assignment.owner for assignment in assignments])
return users
def add_tag(self, tag):
"""Add a Tag to this document"""
from frappe.desk.doctype.tag.tag import DocTags
DocTags(self.doctype).add(self.name, tag)
def get_tags(self):
"""Return a list of Tags attached to this document"""
from frappe.desk.doctype.tag.tag import DocTags
return DocTags(self.doctype).get_tags(self.name).split(",")[1:]
def __repr__(self):
name = self.name or "unsaved"
doctype = self.__class__.__name__
docstatus = f" docstatus={self.docstatus}" if self.docstatus else ""
parent = f" parent={self.parent}" if self.parent else ""
return f"<{doctype}: {name}{docstatus}{parent}>"
def __str__(self):
name = self.name or "unsaved"
doctype = self.__class__.__name__
return f"{doctype}({name})"
def execute_action(doctype, name, action, **kwargs):
"""Execute an action on a document (called by background worker)"""
doc = frappe.get_doc(doctype, name)
doc.unlock()
try:
getattr(doc, action)(**kwargs)
except Exception:
frappe.db.rollback()
# add a comment (?)
if frappe.local.message_log:
msg = json.loads(frappe.local.message_log[-1]).get('message')
else:
msg = '<pre><code>' + frappe.get_traceback() + '</pre></code>'
doc.add_comment('Comment', _('Action Failed') + '<br><br>' + msg)
doc.notify_update()
|
mit
| -3,114,780,777,890,336,000
| 30.290765
| 130
| 0.687219
| false
| 3.211567
| false
| false
| false
|
aknuds1/srl-python-lib
|
Tests/testqtgui/testwidgets.py
|
1
|
7362
|
from _common import *
if has_qt4:
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
import srllib.qtgui.widgets
if has_qt4:
class _LineEdit(srllib.qtgui.widgets._LineEditHelper,
guimocks.QLineEditMock):
_qbase = __qbase = guimocks.QLineEditMock
def __init__(self, contents="", undo_stack=None, undo_text=None, pos=None):
self.__qbase.__init__(self, returnValues={"text": contents})
srllib.qtgui.widgets._LineEditHelper.__init__(self, undo_stack,
undo_text, self.__qbase)
if pos is None:
pos = len(contents) + 1
self.setCursorPosition(pos)
@only_qt4
class LineEditTest(QtTestCase):
def test_construct_with_undo(self):
""" Test constructing with undo. """
# Test default label for undo operation
edit, stack = self.__construct("Test", undo=True)
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New")
self.assertEqual(stack.undoText(), "edit text")
# Test label for undo operation
edit, stack = self.__construct("Test", undo=True, undo_text=
"editing test")
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New")
self.assertEqual(stack.undoText(), "editing test")
def test_undo(self):
""" Test undo functionality. """
edit, stack = self.__construct("Initial", undo=True)
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New")
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New0")
edit.emit(QtCore.SIGNAL("editingFinished()"))
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New1")
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "New0")
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "Initial")
stack.redo()
edit.mockCheckNamedCall(self, "setText", -1, "New0")
stack.redo()
edit.mockCheckNamedCall(self, "setText", -1, "New1")
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "New0")
def test_undo_setText(self):
""" Test undo in conjunction with setText. """
edit, stack = self.__construct(undo=True)
edit.setText("Test")
self.assertNot(stack.canUndo())
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "New")
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "Test")
def test_undo_setText_undoable(self):
""" Test undo in conjunction with setText, with undoable=True. """
edit, stack = self.__construct("Old", undo=True)
edit.setText("New", undoable=True)
stack.undo()
edit.mockCheckNamedCall(self, "setText", -1, "Old")
def test_editText_cursor(self):
"""Verify that the cursor position is kept."""
edit, stack = self.__construct("Txt", undo=True, pos=1)
edit.emit(QtCore.SIGNAL("textEdited(const QString&)"), "Text")
self.assertEqual(edit.cursorPosition(), 1)
def __construct(self, contents=None, undo=False,
undo_text=None, pos=None):
if contents is None:
contents = QtCore.QString()
if undo:
undo_stack = QtGui.QUndoStack()
edit = _LineEdit(contents, undo_stack=undo_stack, undo_text=undo_text,
pos=pos)
if not undo:
return edit
return edit, undo_stack
if has_qt4:
class _NumericalLineEdit(srllib.qtgui.widgets._NumericalLineEditHelper,
_LineEdit):
_qbase = _LineEdit
def __init__(self, floating_point, contents, minimum, maximum):
self._qbase.__init__(self, contents=contents)
srllib.qtgui.widgets._NumericalLineEditHelper.__init__(self,
floating_point, minimum, maximum)
@only_qt4
class NumericalLineEditTest(QtTestCase):
def test_construct(self):
edit = self.__construct(True)
call = edit.mockGetNamedCall("setValidator", 0)
self.assert_(isinstance(call.args[0], QtGui.QDoubleValidator))
edit = self.__construct(True, minimum=0, maximum=1)
call = edit.mockGetNamedCall("setValidator", 0)
vtor = call.args[0]
self.assert_(isinstance(vtor, QtGui.QDoubleValidator))
self.assertEqual(vtor.bottom(), 0)
self.assertEqual(vtor.top(), 1)
edit = self.__construct(False)
call = edit.mockGetNamedCall("setValidator", 0)
self.assert_(isinstance(call.args[0], QtGui.QIntValidator))
edit = self.__construct(False, minimum=0, maximum=1)
call = edit.mockGetNamedCall("setValidator", 0)
vtor = call.args[0]
self.assert_(isinstance(vtor, QtGui.QIntValidator))
self.assertEqual(vtor.bottom(), 0)
self.assertEqual(vtor.top(), 1)
self.assertRaises(ValueError, self.__construct, False, minimum=0.1)
self.assertRaises(ValueError, self.__construct, False, maximum=0.1)
def __construct(self, floating_point, contents=None,
minimum=None, maximum=None):
if contents is None:
contents = QtCore.QString()
edit = _NumericalLineEdit(floating_point=
floating_point, contents=contents, minimum=minimum, maximum=
maximum)
return edit
if has_qt4:
# Note that the helper must be inherited first, to override methods in the
# Qt base
class _CheckBox(srllib.qtgui.widgets._CheckBoxHelper, guimocks.QCheckBoxMock):
_qbase = guimocks.QCheckBoxMock
def __init__(self, undo_stack=None, undo_text=None):
guimocks.QCheckBoxMock.__init__(self)
srllib.qtgui.widgets._CheckBoxHelper.__init__(self, undo_stack=
undo_stack, undo_text=undo_text)
@only_qt4
class CheckBoxHelperTest(QtTestCase):
def test_construct_with_undo(self):
""" Test constructing with undo. """
# Test default label for undo operation
checkbox, stack = self.__construct(undo=True)
self.__change_state(checkbox, True)
self.assertEqual(stack.undoText(), "")
# Test label for undo operation
checkbox, stack = self.__construct(undo=True, undo_text=
"check test")
self.__change_state(checkbox, True)
self.assertEqual(stack.undoText(), "check test")
def test_undo(self):
""" Test undo functionality. """
checkbox, stack = self.__construct(undo=True)
self.__change_state(checkbox, True)
self.__change_state(checkbox, False)
stack.undo()
checkbox.mockCheckNamedCall(self, "setCheckState", -1, Qt.Checked)
stack.undo()
checkbox.mockCheckNamedCall(self, "setCheckState", -1, Qt.Unchecked)
stack.redo()
checkbox.mockCheckNamedCall(self, "setCheckState", -1, Qt.Checked)
def __change_state(self, checkbox, checked):
if checked:
state = int(Qt.Checked)
else:
state = int(Qt.Unchecked)
checkbox.emit(QtCore.SIGNAL("stateChanged(int)"), state)
def __construct(self, checked=False, undo=False, undo_text=None):
if undo:
undo_stack = QtGui.QUndoStack()
checkbox = _CheckBox(undo_stack=undo_stack, undo_text=undo_text)
if not undo:
return checkbox
return checkbox, undo_stack
|
mit
| 4,963,683,699,837,994,000
| 39.01087
| 83
| 0.617359
| false
| 3.814508
| true
| false
| false
|
akhilaananthram/nupic.research
|
encoder_quality/rdse_check_demo.py
|
1
|
2680
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script demonstrates how encoder_check.encoderCheck is used.
The example shows that a RandomDistributedScalarEncoder with higher
resolution will more tightly preserve the distance metric of the scalar
input space.
For three scalar values x, y, z, and their encodings Sx, Sy, and Sz, if
the overlap of Sx and Sy is greater than the overlap of Sx and Sz, we would
hope that the distance between x and y is less than the distance between x and
z. This is the logic that the encoderCheck employs. If it finds values that
violate this property, it reports it with a warning.
"""
import encoder_check
import numpy as np
from nupic.encoders.random_distributed_scalar import (
RandomDistributedScalarEncoder
)
if __name__ == "__main__":
print "Testing RSDE Quality"
maxval = 100.0
minval = -100.0
Nsamples = 1000
encoder1 = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
w=23, n=500, offset=0.0)
encoder2 = RandomDistributedScalarEncoder(name="encoder", resolution=10.0,
w=23, n=500, offset=0.0)
distance_function = lambda x,y : abs(x-y)
sample_generator = lambda : np.random.uniform(minval, maxval)
input_pairs_source = encoder_check.InputTripleCreator(sample_generator)
err1 = encoder_check.encoderCheck(encoder1, distance_function,
input_pairs_source)
err2 = encoder_check.encoderCheck(encoder2, distance_function,
input_pairs_source)
print
print "Warning rate for encoder w/ resolution 1.0: ",
print err1
print "Warning rate for encoder w/ resolution 10.0: ",
print err2
|
gpl-3.0
| -8,209,893,378,988,622,000
| 35.712329
| 78
| 0.672388
| false
| 4.207221
| false
| false
| false
|
GeoscienceAustralia/PyRate
|
tests/test_merge.py
|
1
|
2815
|
# coding: utf-8
# This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains tests for the Merge step of PyRate.
"""
import os
from subprocess import check_call
import itertools
import pytest
from pathlib import Path
from pyrate.merge import create_png_and_kml_from_tif
from pyrate.core import config as cf
from pyrate.merge import _merge_stack, _merge_linrate
from pyrate.configuration import Configuration, write_config_file
from tests.common import manipulate_test_conf
@pytest.fixture
def create_merge_output(tempdir, gamma_conf):
tdir = Path(tempdir())
params = manipulate_test_conf(gamma_conf, tdir)
output_conf_file = tdir.joinpath('conf.cfg')
output_conf = tdir.joinpath(output_conf_file)
write_config_file(params=params, output_conf_file=output_conf)
check_call(f"pyrate conv2tif -f {output_conf}", shell=True)
check_call(f"pyrate prepifg -f {output_conf}", shell=True)
check_call(f"pyrate correct -f {output_conf}", shell=True)
check_call(f"pyrate timeseries -f {output_conf}", shell=True)
check_call(f"pyrate stack -f {output_conf}", shell=True)
params = Configuration(output_conf).__dict__
_merge_stack(params)
_merge_linrate(params)
return params
@pytest.mark.slow
def test_file_creation(create_merge_output):
params = create_merge_output
# check if color map is created
for ot in ['stack_rate', 'stack_error', 'linear_rate', 'linear_error', 'linear_rsquared']:
create_png_and_kml_from_tif(params[cf.OUT_DIR], output_type=ot)
output_color_map_path = os.path.join(params[cf.OUT_DIR], f"colourmap_{ot}.txt")
assert Path(output_color_map_path).exists(), "Output color map file not found at: " + output_color_map_path
# check if merged files are created
for _type, ot in itertools.product(['stack_rate', 'stack_error', 'linear_rate',
'linear_error', 'linear_rsquared'], ['.tif', '.png', '.kml']):
output_image_path = os.path.join(params[cf.OUT_DIR], _type + ot)
print(f"checking {output_image_path}")
assert Path(output_image_path).exists(), f"Output {ot} file not found at {output_image_path}"
|
apache-2.0
| 5,893,206,925,292,384,000
| 41.651515
| 115
| 0.701954
| false
| 3.492556
| true
| false
| false
|
aileisun/bubbleimg
|
bubbleimg/imgmeasure/iso/isomeasurer.py
|
1
|
9351
|
# isomeasurer.py
# ALS 2017/06/01
import os
import astropy.units as u
from astropy.io import fits
import numpy as np
import astropy.table as at
import pickle
import scipy.ndimage as simg
from ..measurer import Measurer
from ... import tabtools
from . import polytools
from . import plottools
class isoMeasurer(Measurer):
def __init__(self, **kwargs):
"""
child of Measurer
do isophotal measurements
"""
super(isoMeasurer, self).__init__(**kwargs)
self.msrtype = 'iso'
def get_fp_contours(self, imgtag='OIII5008_I', onlycenter=False, suffix=''):
""" e.g., msr_iso-OIII5008_I{suffix}_contours.pkl
\or msr_iso-OIII5008_I{suffix}_contours-ctr.pkl
"""
if onlycenter:
ctrtag = '-ctr'
else:
ctrtag = ''
fp_root = self.get_fp_msrtagroot(imgtag=imgtag, suffix=suffix)
return fp_root+'_contours{ctrtag}.pkl'.format(ctrtag=ctrtag)
def make_measurements(self, imgtag='OIII5008_I', isocut=3.e-15*u.Unit('erg / (arcsec2 cm2 s)'), minarea=5, onlycenter=True, centerradius=5.*u.arcsec, plotsuffix='', savecontours=False, plotmsr=False, msrsuffix='', overwrite=False, append=False):
"""
make measurements on a map and write to msr_iso.csv.
if imgtag='OIII5008_I' then measure 'stamp-OIII5008_I.fits'
Params
------
self
imgtag='OIII5008_I'
overwrite = False (bool)
isocut=1.e-15*u.Unit('erg / (arcsec2 cm2 s)'):
isophote cut
minarea=0:
connected contour area (# pix) above the area is counted as part of the isophote measurement
onlycenter=False:
whether to consider only the center contours
centerradius=2.*u.arcsec
plotsuffix = '':
plotsuffix label to be attach to the end of the plot or contour file names.
savecontours=False
plotmsr=False
msrsuffix=''
plotsuffix label in the end of the measurement csv file: msr_iso_{msrsuffix}.csv.
overwrite=False
append=False
Return
------
status (bool)
Write Output
------------
e.g., msr_iso.csv
"""
fn = self.get_fp_msr(msrsuffix=msrsuffix)
condi = {'imgtag': imgtag, 'isocut': isocut, 'minarea': minarea, 'onlycenter': onlycenter, 'centerradius': centerradius}
if append or overwrite or (not tabtools.fn_has_row(fn, condi)):
print("[isomeasurer] making measurement")
img = self.get_stamp_img(imgtag=imgtag, wunit=True)
xc, yc = self._get_xc_yc(img)
# calc
if np.all(~np.isnan(img)):
contours = self._get_contours_from_img(img=img, isocut=isocut, xc=xc, yc=yc, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius)
tab_msr = self._get_tab_measurements_from_contours(contours=contours, xc=xc, yc=yc)
else:
contours = []
tab_msr = self._get_tab_measurements_nan()
tab_params = self._get_tab_params(imgtag=imgtag, isocut=isocut, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius)
tabout = at.hstack([tab_params, tab_msr])
# output
tabtools.write_row(fn=fn, row=tabout, condi=condi, overwrite=overwrite, append=append)
# optional output
if savecontours:
fn_contours = self.get_fp_contours(imgtag=imgtag, onlycenter=onlycenter, suffix=plotsuffix)
write_pickle(contours, fn_contours, overwrite=overwrite)
if plotmsr:
fn_plot = self.get_fp_msrplot(imgtag=imgtag, suffix=plotsuffix)
plottools.make_plot_img_w_contours(fn_plot=fn_plot, img=img, contours=contours)
else:
print("[isomeasurer] skip making measurement as files exist")
return os.path.isfile(fn)
def make_visualpanel(self, fn=None, compo_bands ='gri', imgtag='OIII5008_I', onlycenter=True, minarea=5, centerradius=5.*u.arcsec, tocolorbar=True, totitle=True, fontsize=12, overwrite=False):
"""
make panel figure to visualize the composit and the iso measurements
saved to e.g., 'msr_iso-OIII5008_I_panel.pdf'
Params
------
fn = None: default: msr_iso_{imgtag}_panel.pdf
compo_bands ='gri', imgtag='OIII5008_I', overwrite=False
Return
------
status
"""
if fn is None:
fn = self.get_fp_msrplot(imgtag=imgtag, suffix='_panel')
else:
fn = self.dir_obj+fn
if not os.path.isfile(fn) or overwrite:
print("[isomeasurer] making visual panel")
# get files ready
self.make_colorimg(bands=compo_bands, img_type='stamp', overwrite=False)
# access data
img_compo = simg.imread(self.dir_obj+'color_stamp-{}.png'.format(compo_bands))
img_map = self.get_stamp_img(imgtag=imgtag, wunit=False)
suffix = '_3e-15'
isocut = 3.e-15*u.Unit('erg / (arcsec2 cm2 s)')
fn_contours3 = self.get_fp_contours(imgtag=imgtag, onlycenter=onlycenter, suffix=suffix)
if not os.path.isfile(fn_contours3):
print("[isomeasurer] re-doing measurements to make contours required for visual panel plots")
self.make_measurements(imgtag=imgtag, isocut=isocut, plotsuffix=suffix, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius, overwrite=True, savecontours=True, plotmsr=False),
contours3 = read_pickle(fn_contours3)
suffix = '_1e-15'
isocut = 1.e-15*u.Unit('erg / (arcsec2 cm2 s)')
fn_contours1 = self.get_fp_contours(imgtag=imgtag, onlycenter=onlycenter, suffix=suffix)
if not os.path.isfile(fn_contours1):
print("[isomeasurer] re-doing measurements to make contours required for visual panel plots")
self.make_measurements(imgtag=imgtag, isocut=isocut, plotsuffix=suffix, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius, overwrite=True, savecontours=True, plotmsr=False),
contours1 = read_pickle(fn_contours1)
z = self.z
pixsize = self.pixsize.to_value(u.arcsec)
legend_suffix = ' at 3'
name = self.obj.name[4:]
title_compo = '${}~{}~{}~$'.format(compo_bands[0], compo_bands[1], compo_bands[2])+'$\mathrm{Composite}$'
title_map = '$\mathrm{[OIII]\lambda 5007~Intensity}$'
label_cbar = '$I~[10^{-15}~\mathrm{erg~s^{-1}~cm^{-2}~arcsec^{-2}}]$'
plottools.make_iso_visual_panel(fn, img_compo, img_map, contours1, contours3, z, pixsize, legend_suffix, name, title_compo, title_map, label_cbar, tocolorbar=tocolorbar, totitle=totitle, fontsize=fontsize)
else:
print("[isomeasurer] skip making visual panel as files exist")
return os.path.isfile(fn)
def _get_tab_params(self, imgtag, isocut, minarea, onlycenter, centerradius):
"""
return a one row table of the measurement params
"""
tab = at.Table([[imgtag], [str(isocut)], [minarea], [onlycenter], [str(centerradius)], ], names=['imgtag', 'isocut', 'minarea', 'onlycenter', 'centerradius', ])
return tab
def _get_tab_measurements_from_contours(self, contours, xc, yc):
"""
calculate iso measurements from contours, return a table like:
"""
tab = polytools.ShapeParamsTab_from_contours(contours, xc, yc)
# unit conversion
area_ars = tab['area_pix'][0]*(self.pixsize/u.arcsec)**2
dmax_ars = self._pix_to_theta(tab['dmax_pix'][0], wunit=False)
rmax_ars = self._pix_to_theta(tab['rmax_pix'][0], wunit=False)
dper_ars = self._pix_to_theta(tab['dper_pix'][0], wunit=False)
kpc_per_arcsec = np.array(self._get_kpc_proper_per_arcsec())
area_kpc = area_ars * kpc_per_arcsec**2
dmax_kpc = dmax_ars * kpc_per_arcsec
rmax_kpc = rmax_ars * kpc_per_arcsec
dper_kpc = dper_ars * kpc_per_arcsec
tab_converted = at.Table(names=['area_kpc', 'dmax_kpc', 'rmax_kpc', 'dper_kpc', 'area_ars', 'dmax_ars', 'rmax_ars', 'dper_ars', ])
tab_converted.add_row([area_kpc, dmax_kpc, rmax_kpc, dper_kpc, area_ars, dmax_ars, rmax_ars, dper_ars, ])
tabout = at.hstack([tab_converted, tab])
return tabout
def _get_tab_measurements_nan(self):
"""
return a tab measurement just like _get_tab_measurements_from_contours() but with entries all nan.
"""
names = ['area_kpc', 'dmax_kpc', 'rmax_kpc', 'dper_kpc', 'area_ars', 'dmax_ars', 'rmax_ars', 'dper_ars', 'area_pix', 'dmax_pix', 'rmax_pix', 'dper_pix', 'theta_dmax', 'theta_rmax', 'theta_dper', 'aspectr']
tabout = at.Table(names=names)
tabout.add_row([np.nan for i in range(len(names))])
return tabout
def _get_contours_from_img(self, img, isocut, xc, yc, minarea=0., onlycenter=False, centerradius=2.*u.arcsec):
"""
make contour at isocut of image as python pickle file (fn_contours)
always overwrite
Params
------
self
img (array)
isocut (float or quantity):
has to be of the same type of unit as image
minarea (float):
minimum area (pix) to be considered as contour patch
onlycenter (bool):
whether to take only center patches as patches (they all have to pass minarea test as well)
centerradius (angular quantity):
if onlycenter = True, then it sets the radius of the center area. only patches overlapping with that area will be considered.
"""
# prep
try:
img.unit
except:
img_nparr = img/isocut
else:
img_nparr = np.array((img/isocut).to(u.dimensionless_unscaled))
# find contours -- satisfy minarea
contours = polytools.find_largecontours(img=img_nparr, threshold=1., minarea=minarea)
if onlycenter: # select only those at the center
centerradius_pix = self._theta_to_pix(centerradius)
contours = polytools.select_center_contours(contours, xc, yc, radius=centerradius_pix)
return contours
def read_pickle(fn):
with open(fn, 'rb') as handle:
result = pickle.load(handle)
return result
def write_pickle(result, fn, overwrite=False):
if not os.path.isfile(fn) or overwrite:
with open(fn, 'wb') as handle:
pickle.dump(result, handle)
|
mit
| 7,320,847,590,150,667,000
| 32.636691
| 246
| 0.694364
| false
| 2.782208
| false
| false
| false
|
ShaguptaS/faker
|
faker/providers/user_agent.py
|
1
|
5274
|
from __future__ import unicode_literals
from . import BaseProvider
from . import date_time
from datetime import datetime
import random
class Provider(BaseProvider):
user_agents = ('firefox', 'chrome', 'internet_explorer', 'opera', 'safari')
windows_platform_tokens = (
'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.2', 'Windows NT 5.1',
'Windows NT 5.01', 'Windows NT 5.0', 'Windows NT 4.0', 'Windows 98; Win 9x 4.90',
'Windows 98', 'Windows 95', 'Windows CE'
)
linux_processors = ('i686', 'x86_64',)
mac_processors = ('Intel', 'PPC', 'U; Intel', 'U; PPC')
langs = ('en-US', 'sl-SI', 'it-IT')
@classmethod
def mac_processor(cls):
return cls.random_element(cls.mac_processors)
@classmethod
def linux_processor(cls):
return cls.random_element(cls.linux_processors)
@classmethod
def user_agent(cls):
name = cls.random_element(cls.user_agents)
return getattr(cls, name)()
@classmethod
def chrome(cls):
saf = str(random.randint(531, 536)) + str(random.randint(0, 2))
platforms = (
"({0}) AppleWebKit/{1} (KHTML, like Gecko) Chrome/{2}.0.{3}.0 Safari/{4}".format(
cls.linux_platform_token(), saf, random.randint(13, 15), random.randint(800, 899), saf),
"({0}) AppleWebKit/{1} (KHTML, like Gecko) Chrome/{2}.0.{3}.0 Safari/{4}".format(
cls.windows_platform_token(), saf, random.randint(13, 15), random.randint(800, 899), saf),
"({0}) AppleWebKit/{1} (KHTML, like Gecko) Chrome/{2}.0.{3}.0 Safari/{4}".format(
cls.mac_platform_token(), saf, random.randint(13, 15), random.randint(800, 899), saf),
)
return 'Mozilla/5.0 ' + cls.random_element(platforms)
@classmethod
def firefox(cls):
ver = (
'Gecko/{0} Firefox/{1}.0'.format(
date_time.Provider.date_time_between(datetime(2011, 1, 1)), random.randint(4, 15)),
'Gecko/{0} Firefox/3.6.{1}'.format(
date_time.Provider.date_time_between(datetime(2010, 1, 1)), random.randint(1, 20)),
'Gecko/{0} Firefox/3.8'.format(date_time.Provider.date_time_between(datetime(2010, 1, 1)), ),
)
platforms = (
"({0}; {1}; rv:1.9.{2}.20) {3}".format(
cls.windows_platform_token(), cls.random_element(cls.langs), random.randint(0, 2), random.choice(ver)),
"({0}; rv:1.9.{1}.20) {2}".format(cls.linux_platform_token(), random.randint(5, 7), random.choice(ver)),
"({0}; rv:1.9.{1}.20) {2}".format(cls.mac_platform_token(), random.randint(2, 6), random.choice(ver)),
)
return 'Mozilla/5.0 ' + cls.random_element(platforms)
@classmethod
def safari(cls):
saf = "{0}.{1}.{2}".format(random.randint(531, 535), random.randint(1, 50), random.randint(1, 7))
if random.randint(0, 1) == 0:
ver = "{0}.{1}".format(random.randint(4, 5), random.randint(0, 1))
else:
ver = "{0}.0.{1}".format(random.randint(4, 5), random.randint(1, 5))
platforms = (
'(Windows; U; {0}) AppleWebKit/{1} (KHTML, like Gecko) Version/{2} Safari/{3}'.format(
cls.windows_platform_token(), saf, ver, saf),
'({0} rv:{1}.0; {2}) AppleWebKit/{3} (KHTML, like Gecko) Version/{4} Safari/{5}'.format(
cls.mac_platform_token(), random.randint(2, 6), cls.random_element(cls.langs), saf, ver, saf),
'(iPod; U; CPU iPhone OS {0}_{1} like Mac OS X; {2}) AppleWebKit/{3} (KHTML, like Gecko) Version/{4}.0.5 Mobile/8B{5} Safari/6{6}'.format(
random.randint(3, 4), random.randint(0, 3), cls.random_element(cls.langs), saf, random.randint(3, 4),
random.randint(111, 119), saf
)
)
return 'Mozilla/5.0 ' + cls.random_element(platforms)
@classmethod
def opera(cls):
platforms = (
'({0}; {1}) Presto/2.9.{2} Version/{3}.00'.format(
cls.linux_platform_token(), cls.random_element(cls.langs), random.randint(160, 190),
random.randint(10, 12)),
'({0}; {1}) Presto/2.9.{2} Version/{3}.00'.format(
cls.windows_platform_token(), cls.random_element(cls.langs), random.randint(160, 190),
random.randint(10, 12)),
)
return 'Opera/{0}.{1}.{2}'.format(random.randint(8, 9), random.randint(10, 99), cls.random_element(platforms))
@classmethod
def internet_explorer(cls):
return 'Mozilla/5.0 (compatible; MSIE {0}.0; {1}; Trident/{2}.{3})'.format(
random.randint(5, 9),
cls.windows_platform_token(),
random.randint(3, 5),
random.randint(0, 1)
)
@classmethod
def windows_platform_token(cls):
return cls.random_element(cls.windows_platform_tokens)
@classmethod
def linux_platform_token(cls):
return 'X11; Linux {0}'.format(cls.random_element(cls.linux_processors))
@classmethod
def mac_platform_token(cls):
return 'Macintosh; {0} Mac OS X 10_{1}_{2}'.format(
cls.random_element(cls.mac_processors), random.randint(5, 8), random.randint(0, 9))
|
mit
| -4,744,924,618,467,953,000
| 40.857143
| 150
| 0.571483
| false
| 3.273743
| false
| false
| false
|
jeremiedecock/snippets
|
python/tkinter/python3/keyboard_events.py
|
1
|
5625
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See: https://github.com/jeremiedecock/pyarm/blob/master/pyarm/gui/tkinter_gui.py
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/key-names.html
# http://www.tcl.tk/man/tcl8.4/TkCmd/keysyms.htm
import tkinter as tk
root = tk.Tk()
label = tk.Label(root, text="Press some keys", width=50, height=10)
label.pack()
# SETUP KEYBOARD EVENT CALLBACKS
def keypress_callback(event):
if event.keysym == "Up":
print("keypress: <Up>")
elif event.keysym == "Down":
print("keypress: <Down>")
elif event.keysym == "Left":
print("keypress: <Left>")
elif event.keysym == "Right":
print("keypress: <Right>")
elif event.keysym == "Return":
print("keypress: <Return>")
elif event.keysym == "Escape":
print("keypress: <Escape>")
elif event.keysym == "space":
print("keypress: <space>")
elif event.keysym == "Control_R":
print("keypress: <Control_R>")
elif event.keysym == "Control_L":
print("keypress: <Control_L>")
elif event.keysym == "Shift_R":
print("keypress: <Shift_R>")
elif event.keysym == "Shift_L":
print("keypress: <Shift_L>")
elif event.keysym == "Tab":
print("keypress: <Tab>")
elif event.keysym == "Super_R":
print("keypress: <Super_R>")
elif event.keysym == "Super_L":
print("keypress: <Super_L>")
elif event.keysym == "BackSpace":
print("keypress: <BackSpace>")
elif event.keysym == "Prior": # PgUp
print("keypress: <Prior>")
elif event.keysym == "Next": # PgDown
print("keypress: <Next>")
elif event.char == "a":
print("keypress: <a>")
elif event.char == "b":
print("keypress: <b>")
elif event.char == "c":
print("keypress: <c>")
elif event.char == "d":
print("keypress: <d>")
elif event.char == "A":
print("keypress: <A>")
elif event.char == "B":
print("keypress: <B>")
elif event.char == "C":
print("keypress: <C>")
elif event.char == "D":
print("keypress: <D>")
elif event.char == "1":
print("keypress: <1>")
elif event.char == "2":
print("keypress: <2>")
elif event.char == "3":
print("keypress: <3>")
else:
print("keypress:", event.char, event.keysym)
def keyrelease_callback(event):
if event.keysym == "Up":
print("keyrelease: <Up>")
elif event.keysym == "Down":
print("keyrelease: <Down>")
elif event.keysym == "Left":
print("keyrelease: <Left>")
elif event.keysym == "Right":
print("keyrelease: <Right>")
elif event.keysym == "Return":
print("keyrelease: <Return>")
elif event.keysym == "Escape":
print("keyrelease: <Escape>")
elif event.keysym == "space":
print("keyrelease: <space>")
elif event.keysym == "Control_R":
print("keyrelease: <Control_R>")
elif event.keysym == "Control_L":
print("keyrelease: <Control_L>")
elif event.keysym == "Shift_R":
print("keyrelease: <Shift_R>")
elif event.keysym == "Shift_L":
print("keyrelease: <Shift_L>")
elif event.keysym == "Tab":
print("keyrelease: <Tab>")
elif event.keysym == "Super_R":
print("keyrelease: <Super_R>")
elif event.keysym == "Super_L":
print("keyrelease: <Super_L>")
elif event.keysym == "BackSpace":
print("keyrelease: <BackSpace>")
elif event.keysym == "Prior": # PgUp
print("keyrelease: <Prior>")
elif event.keysym == "Next": # PgDown
print("keyrelease: <Next>")
elif event.char == "a":
print("keyrelease: <a>")
elif event.char == "b":
print("keyrelease: <b>")
elif event.char == "c":
print("keyrelease: <c>")
elif event.char == "d":
print("keyrelease: <d>")
elif event.char == "A":
print("keyrelease: <A>")
elif event.char == "B":
print("keyrelease: <B>")
elif event.char == "C":
print("keyrelease: <C>")
elif event.char == "D":
print("keyrelease: <D>")
elif event.char == "1":
print("keyrelease: <1>")
elif event.char == "2":
print("keyrelease: <2>")
elif event.char == "3":
print("keyrelease: <3>")
else:
print("keyrelease:", event.char, event.keysym)
root.bind("<KeyPress>", keypress_callback)
root.bind("<KeyRelease>", keyrelease_callback)
root.mainloop()
|
mit
| 4,922,969,895,132,361,000
| 34.14375
| 82
| 0.599502
| false
| 3.462438
| false
| false
| false
|
jessepeterson/commandment
|
commandment/alembic/versions/8c866896f76e_create_dep_join_tables.py
|
1
|
1729
|
"""empty message
Revision ID: 8c866896f76e
Revises: 0e5babc5b9ee
Create Date: 2017-07-19 12:57:58.086196
"""
from alembic import op
import sqlalchemy as sa
import commandment.dbtypes
from alembic import context
# revision identifiers, used by Alembic.
revision = '8c866896f76e'
down_revision = '0e5babc5b9ee'
branch_labels = None
depends_on = None
def upgrade():
schema_upgrades()
# if context.get_x_argument(as_dictionary=True).get('data', None):
# data_upgrades()
def downgrade():
# if context.get_x_argument(as_dictionary=True).get('data', None):
# data_downgrades()
schema_downgrades()
def schema_upgrades():
"""schema upgrade migrations go here."""
op.create_table('dep_profile_anchor_certificates',
sa.Column('dep_profile_id', sa.Integer(), nullable=True),
sa.Column('certificate_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ),
sa.ForeignKeyConstraint(['dep_profile_id'], ['dep_profiles.id'], )
)
op.create_table('dep_profile_supervision_certificates',
sa.Column('dep_profile_id', sa.Integer(), nullable=True),
sa.Column('certificate_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ),
sa.ForeignKeyConstraint(['dep_profile_id'], ['dep_profiles.id'], )
)
def schema_downgrades():
"""schema downgrade migrations go here."""
op.drop_table('dep_profile_supervision_certificates')
op.drop_table('dep_profile_anchor_certificates')
def data_upgrades():
"""Add any optional data upgrade migrations here!"""
pass
def data_downgrades():
"""Add any optional data downgrade migrations here!"""
pass
|
mit
| -6,987,280,669,994,090,000
| 26.444444
| 71
| 0.689994
| false
| 3.5
| false
| false
| false
|
lgbouma/astrobase
|
astrobase/services/gaia.py
|
1
|
44599
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# gaia - Waqas Bhatti (wbhatti@astro.princeton.edu) - Dec 2017
# License: MIT. See the LICENSE file for more details.
'''
This queries the GAIA catalog for object lists in specified areas of the
sky. The main use of this module is to generate realistic spatial distributions
of stars for variability recovery simulations in combination with colors and
luminosities from the TRILEGAL galaxy model.
If you use this module, please cite the GAIA papers as outlined at:
https://gaia.esac.esa.int/documentation//GDR1/Miscellaneous/sec_credit_and_citation_instructions.html
Much of this module is derived from the example given at:
http://gea.esac.esa.int/archive-help/commandline/index.html
For a more general and useful interface to the GAIA catalog, see the astroquery
package by A. Ginsburg, B. Sipocz, et al.:
http://astroquery.readthedocs.io/en/latest/gaia/gaia.html
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import os
import os.path
import gzip
import hashlib
import time
import pickle
import random
# to do the queries
import requests
import requests.exceptions
# to read the XML returned by the TAP service
from xml.dom.minidom import parseString
###################
## FORM SETTINGS ##
###################
GAIA_URLS = {
'gaia':{'url':"https://gea.esac.esa.int/tap-server/tap/async",
'table':'gaiadr2.gaia_source',
'phasekeyword':'uws:phase',
'resultkeyword':'uws:result'},
'heidelberg':{'url':"http://gaia.ari.uni-heidelberg.de/tap/async",
'table':'gaiadr2.gaia_source',
'phasekeyword':'phase',
'resultkeyword':'result'},
'vizier':{'url':"http://tapvizier.u-strasbg.fr/TAPVizieR/tap/async",
'table':'"I/345/gaia2"',
'phasekeyword':'phase',
'resultkeyword':'result'},
}
# default TAP query params, will be copied and overridden
TAP_PARAMS = {
'REQUEST':'doQuery',
'LANG':'ADQL',
'FORMAT':'json',
'PHASE':'RUN',
'JOBNAME':'',
'JOBDESCRIPTION':'',
'QUERY':''
}
# valid return formats
RETURN_FORMATS = {
'json':'json.gz',
'csv':'csv.gz',
'votable':'vot',
}
#####################
## QUERY FUNCTIONS ##
#####################
def tap_query(querystr,
gaia_mirror=None,
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=False):
'''This queries the GAIA TAP service using an ADQL query string.
Parameters
----------
querystr : str
This is the ADQL query string. See:
http://www.ivoa.net/documents/ADQL/2.0 for the specification and
http://gea.esac.esa.int/archive-help/adql/index.html for GAIA-specific
additions.
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# get the default params
inputparams = TAP_PARAMS.copy()
# update them with our input params
inputparams['QUERY'] = querystr[::]
if returnformat in RETURN_FORMATS:
inputparams['FORMAT'] = returnformat
else:
LOGWARNING('unknown result format: %s requested, using CSV' %
returnformat)
inputparams['FORMAT'] = 'csv'
# see if the cachedir exists
if '~' in cachedir:
cachedir = os.path.expanduser(cachedir)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# generate the cachefname and look for it
xcachekey = '-'.join([repr(inputparams[x])
for x in sorted(inputparams.keys())])
cachekey = hashlib.sha256(xcachekey.encode()).hexdigest()
cachefname = os.path.join(
cachedir,
'%s.%s' % (cachekey, RETURN_FORMATS[returnformat])
)
provenance = 'cache'
incomplete_qpklf = os.path.join(
cachedir,
'incomplete-query-%s' % cachekey
)
##########################################
## COMPLETE A QUERY THAT MAY BE RUNNING ##
##########################################
# first, check if this query can be resurrected
if (not forcefetch and
complete_query_later and
os.path.exists(incomplete_qpklf)):
with open(incomplete_qpklf, 'rb') as infd:
incomplete_qinfo = pickle.load(infd)
LOGWARNING('complete_query_later = True, and '
'this query was not completed on a '
'previous run, will check if it is done now...')
# get the status URL and go into a loop to see if the query completed
waitdone = False
timeelapsed = 0.0
gaia_mirror = incomplete_qinfo['gaia_mirror']
status_url = incomplete_qinfo['status_url']
phasekeyword = incomplete_qinfo['phase_keyword']
resultkeyword = incomplete_qinfo['result_keyword']
while not waitdone:
if timeelapsed > maxtimeout:
LOGERROR('GAIA TAP query still not done '
'after waiting %s seconds for results.\n'
'status URL is: %s' %
(maxtimeout,
repr(inputparams),
status_url))
return None
try:
resreq = requests.get(status_url,
timeout=timeout)
resreq.raise_for_status()
# parse the response XML and get the job status
resxml = parseString(resreq.text)
jobstatuselem = (
resxml.getElementsByTagName(phasekeyword)[0]
)
jobstatus = jobstatuselem.firstChild.toxml()
if jobstatus == 'COMPLETED':
if verbose:
LOGINFO('GAIA query completed, '
'retrieving results...')
waitdone = True
# if we're not done yet, then wait some more
elif jobstatus != 'ERROR':
if verbose:
LOGINFO('elapsed time: %.1f, '
'current status: %s, '
'status URL: %s, waiting...'
% (timeelapsed, jobstatus, status_url))
time.sleep(refresh)
timeelapsed = timeelapsed + refresh
# if the JOB failed, then bail out immediately
else:
LOGERROR('GAIA TAP query failed due to a server error.\n'
'status URL: %s\n'
'status contents: %s' %
(status_url,
resreq.text))
# since this job failed, remove the incomplete query pickle
# so we can try this from scratch
os.remove(incomplete_qpklf)
return None
except requests.exceptions.Timeout:
LOGEXCEPTION(
'GAIA query timed out while waiting for status '
'download results.\n'
'query: %s\n'
'status URL: %s' %
(repr(inputparams), status_url)
)
return None
except Exception:
LOGEXCEPTION(
'GAIA query failed while waiting for status\n'
'query: %s\n'
'status URL: %s\n'
'status contents: %s' %
(repr(inputparams),
status_url,
resreq.text)
)
# if the query fails completely, then either the status URL
# doesn't exist any more or something else went wrong. we'll
# remove the incomplete query pickle so we can try this from
# scratch
os.remove(incomplete_qpklf)
return None
#
# at this point, we should be ready to get the query results
#
LOGINFO('query completed, retrieving results...')
result_url_elem = resxml.getElementsByTagName(resultkeyword)[0]
result_url = result_url_elem.getAttribute('xlink:href')
result_nrows = result_url_elem.getAttribute('rows')
try:
resreq = requests.get(result_url, timeout=timeout)
resreq.raise_for_status()
if cachefname.endswith('.gz'):
with gzip.open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
else:
with open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
if verbose:
LOGINFO('done. rows in result: %s' % result_nrows)
tablefname = cachefname
provenance = 'cache'
# return a dict pointing to the result file
# we'll parse this later
resdict = {'params':inputparams,
'provenance':provenance,
'result':tablefname}
# all went well, so we'll remove the incomplete query pickle
os.remove(incomplete_qpklf)
return resdict
except requests.exceptions.Timeout:
LOGEXCEPTION(
'GAIA query timed out while trying to '
'download results.\n'
'query: %s\n'
'result URL: %s' %
(repr(inputparams), result_url)
)
return None
except Exception:
LOGEXCEPTION(
'GAIA query failed because of an error '
'while trying to download results.\n'
'query: %s\n'
'result URL: %s\n'
'response status code: %s' %
(repr(inputparams),
result_url,
resreq.status_code)
)
# if the result download fails, then either the result URL doesn't
# exist any more or something else went wrong. we'll remove the
# incomplete query pickle so we can try this from scratch
os.remove(incomplete_qpklf)
return None
#####################
## RUN A NEW QUERY ##
#####################
# otherwise, we check the cache if it's done already, or run it again if not
if forcefetch or (not os.path.exists(cachefname)):
provenance = 'new download'
# generate a jobid here and update the input params
jobid = 'ab-gaia-%i' % time.time()
inputparams['JOBNAME'] = jobid
inputparams['JOBDESCRIPTION'] = 'astrobase-gaia-tap-ADQL-query'
try:
waitdone = False
timeelapsed = 0.0
# set the gaia mirror to use
if gaia_mirror is not None and gaia_mirror in GAIA_URLS:
tapurl = GAIA_URLS[gaia_mirror]['url']
resultkeyword = GAIA_URLS[gaia_mirror]['resultkeyword']
phasekeyword = GAIA_URLS[gaia_mirror]['phasekeyword']
randkey = gaia_mirror
# sub in a table name if this is left unresolved in the input
# query
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=GAIA_URLS[gaia_mirror]['table']
)
)
else:
randkey = random.choice(list(GAIA_URLS.keys()))
tapurl = GAIA_URLS[randkey]['url']
resultkeyword = GAIA_URLS[randkey]['resultkeyword']
phasekeyword = GAIA_URLS[randkey]['phasekeyword']
# sub in a table name if this is left unresolved in the input
# query
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=GAIA_URLS[randkey]['table']
)
)
if verbose:
LOGINFO('using GAIA mirror TAP URL: %s' % tapurl)
# send the query and get status
if verbose:
LOGINFO('submitting GAIA TAP query request for input params: %s'
% repr(inputparams))
# here, we'll make sure the GAIA mirror works before doing anything
# else
mirrorok = False
ntries = 1
while not mirrorok:
if ntries > maxtries:
LOGERROR('maximum number of allowed GAIA query '
'submission tries (%s) reached, bailing out...' %
maxtries)
return None
try:
req = requests.post(tapurl,
data=inputparams,
timeout=timeout)
resp_status = req.status_code
req.raise_for_status()
mirrorok = True
# this handles immediate 503s
except requests.exceptions.HTTPError:
LOGWARNING(
'GAIA TAP server: %s not responding, '
'trying another mirror...'
% tapurl
)
mirrorok = False
# make sure not to hit current mirror again if it's down
remainingmirrors = list(GAIA_URLS.keys())
remainingmirrors.remove(randkey)
randkey = random.choice(remainingmirrors)
tapurl = GAIA_URLS[randkey]['url']
resultkeyword = GAIA_URLS[randkey]['resultkeyword']
phasekeyword = GAIA_URLS[randkey]['phasekeyword']
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=GAIA_URLS[randkey]['table']
)
)
# this handles initial query submission timeouts
except requests.exceptions.Timeout:
LOGWARNING(
'GAIA TAP query submission timed out, '
'mirror is probably down. Trying another mirror...'
)
mirrorok = False
# make sure not to hit current mirror again if it's down
remainingmirrors = list(GAIA_URLS.keys())
remainingmirrors.remove(randkey)
randkey = random.choice(remainingmirrors)
tapurl = GAIA_URLS[randkey]['url']
resultkeyword = GAIA_URLS[randkey]['resultkeyword']
phasekeyword = GAIA_URLS[randkey]['phasekeyword']
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=GAIA_URLS[randkey]['table']
)
)
# update the number of submission tries
ntries = ntries + 1
# NOTE: python-requests follows the "303 See Other" redirect
# automatically, so we get the XML status doc immediately. We don't
# need to look up the location of it in the initial response's
# header as in the GAIA example.
status_url = req.url
# parse the response XML and get the job status
resxml = parseString(req.text)
jobstatuselem = resxml.getElementsByTagName(phasekeyword)
if jobstatuselem:
jobstatuselem = jobstatuselem[0]
else:
LOGERROR('could not parse job phase using '
'keyword %s in result XML' % phasekeyword)
LOGERROR('%s' % req.txt)
req.close()
return None
jobstatus = jobstatuselem.firstChild.toxml()
# if the job completed already, jump down to retrieving results
if jobstatus == 'COMPLETED':
if verbose:
LOGINFO('GAIA query completed, '
'retrieving results...')
waitdone = True
elif jobstatus == 'ERROR':
if verbose:
LOGERROR(
'GAIA query failed immediately '
'(probably an ADQL error): %s, '
'status URL: %s, status contents: %s' %
(repr(inputparams),
status_url,
req.text)
)
return None
# we wait for the job to complete if it's not done already
else:
if verbose:
LOGINFO(
'request submitted successfully, '
'current status is: %s. '
'waiting for results...' % jobstatus
)
while not waitdone:
if timeelapsed > maxtimeout:
LOGERROR('GAIA TAP query timed out '
'after waiting %s seconds for results.\n'
'request was: %s\n'
'status URL is: %s\n'
'last status was: %s' %
(maxtimeout,
repr(inputparams),
status_url,
jobstatus))
# here, we'll check if we're allowed to sleep on a query
# for a bit and return to it later if the last status
# was QUEUED or EXECUTING
if complete_query_later and jobstatus in ('EXECUTING',
'QUEUED'):
# write a pickle with the query params that we can
# pick up later to finish this query
incomplete_qpklf = os.path.join(
cachedir,
'incomplete-query-%s' % cachekey
)
with open(incomplete_qpklf, 'wb') as outfd:
savedict = inputparams.copy()
savedict['status_url'] = status_url
savedict['last_status'] = jobstatus
savedict['gaia_mirror'] = gaia_mirror
savedict['phase_keyword'] = phasekeyword
savedict['result_keyword'] = resultkeyword
pickle.dump(savedict,
outfd,
pickle.HIGHEST_PROTOCOL)
LOGINFO('complete_query_later = True, '
'last state of query was: %s, '
'will resume later if this function '
'is called again with the same query' %
jobstatus)
return None
time.sleep(refresh)
timeelapsed = timeelapsed + refresh
try:
resreq = requests.get(status_url, timeout=timeout)
resreq.raise_for_status()
# parse the response XML and get the job status
resxml = parseString(resreq.text)
jobstatuselem = (
resxml.getElementsByTagName(phasekeyword)[0]
)
jobstatus = jobstatuselem.firstChild.toxml()
if jobstatus == 'COMPLETED':
if verbose:
LOGINFO('GAIA query completed, '
'retrieving results...')
waitdone = True
else:
if verbose:
LOGINFO('elapsed time: %.1f, '
'current status: %s, '
'status URL: %s, waiting...'
% (timeelapsed, jobstatus, status_url))
continue
except requests.exceptions.Timeout:
LOGEXCEPTION(
'GAIA query timed out while waiting for results '
'download results.\n'
'query: %s\n'
'status URL: %s' %
(repr(inputparams), status_url)
)
return None
except Exception:
LOGEXCEPTION(
'GAIA query failed while waiting for results\n'
'query: %s\n'
'status URL: %s\n'
'status contents: %s' %
(repr(inputparams),
status_url,
resreq.text)
)
return None
#
# at this point, we should be ready to get the query results
#
result_url_elem = resxml.getElementsByTagName(resultkeyword)[0]
result_url = result_url_elem.getAttribute('xlink:href')
result_nrows = result_url_elem.getAttribute('rows')
try:
resreq = requests.get(result_url, timeout=timeout)
resreq.raise_for_status()
if cachefname.endswith('.gz'):
with gzip.open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
else:
with open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
if verbose:
LOGINFO('done. rows in result: %s' % result_nrows)
tablefname = cachefname
except requests.exceptions.Timeout:
LOGEXCEPTION(
'GAIA query timed out while trying to '
'download results.\n'
'query: %s\n'
'result URL: %s' %
(repr(inputparams), result_url)
)
return None
except Exception:
LOGEXCEPTION(
'GAIA query failed because of an error '
'while trying to download results.\n'
'query: %s\n'
'result URL: %s\n'
'response status code: %s' %
(repr(inputparams),
result_url,
resreq.status_code)
)
return None
except requests.exceptions.HTTPError:
LOGEXCEPTION('GAIA TAP query failed.\nrequest status was: '
'%s.\nquery was: %s' % (resp_status,
repr(inputparams)))
return None
except requests.exceptions.Timeout:
LOGERROR('GAIA TAP query submission timed out, '
'site is probably down. Request was: '
'%s' % repr(inputparams))
return None
except Exception:
LOGEXCEPTION('GAIA TAP query request failed for '
'%s' % repr(inputparams))
if 'resxml' in locals():
LOGERROR('HTTP response from service:\n%s' % req.text)
return None
############################
## GET RESULTS FROM CACHE ##
############################
else:
if verbose:
LOGINFO('getting cached GAIA query result for '
'request: %s' %
(repr(inputparams)))
tablefname = cachefname
#
# all done with retrieval, now return the result dict
#
# return a dict pointing to the result file
# we'll parse this later
resdict = {'params':inputparams,
'provenance':provenance,
'result':tablefname}
return resdict
def objectlist_conesearch(racenter,
declcenter,
searchradiusarcsec,
gaia_mirror=None,
columns=('source_id',
'ra','dec',
'phot_g_mean_mag',
'l','b',
'parallax', 'parallax_error',
'pmra','pmra_error',
'pmdec','pmdec_error'),
extra_filter=None,
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=True):
'''This queries the GAIA TAP service for a list of objects near the coords.
Runs a conesearch around `(racenter, declcenter)` with radius in arcsec of
`searchradiusarcsec`.
Parameters
----------
racenter,declcenter : float
The center equatorial coordinates in decimal degrees.
searchradiusarcsec : float
The search radius of the cone-search in arcseconds.
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
columns : sequence of str
This indicates which columns from the GAIA table to request for the
objects found within the search radius.
extra_filter: str or None
If this is provided, must be a valid ADQL filter string that is used to
further filter the cone-search results.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# this was generated using the awesome query generator at:
# https://gea.esac.esa.int/archive/
# NOTE: here we don't resolve the table name right away. this is because
# some of the GAIA mirrors use different table names, so we leave the table
# name to be resolved by the lower level tap_query function. this is done by
# the {{table}} construct.
query = (
"select {columns}, "
"(DISTANCE(POINT('ICRS', "
"{{table}}.ra, {{table}}.dec), "
"POINT('ICRS', {ra_center:.5f}, {decl_center:.5f})))*3600.0 "
"AS dist_arcsec "
"from {{table}} where "
"CONTAINS(POINT('ICRS',{{table}}.ra, {{table}}.dec),"
"CIRCLE('ICRS',{ra_center:.5f},{decl_center:.5f},"
"{search_radius:.6f}))=1 "
"{extra_filter_str}"
"ORDER by dist_arcsec asc "
)
if extra_filter is not None:
extra_filter_str = ' and %s ' % extra_filter
else:
extra_filter_str = ''
formatted_query = query.format(ra_center=racenter,
decl_center=declcenter,
search_radius=searchradiusarcsec/3600.0,
extra_filter_str=extra_filter_str,
columns=', '.join(columns))
return tap_query(formatted_query,
gaia_mirror=gaia_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later)
def objectlist_radeclbox(radeclbox,
gaia_mirror=None,
columns=('source_id',
'ra','dec',
'phot_g_mean_mag',
'l','b',
'parallax, parallax_error',
'pmra','pmra_error',
'pmdec','pmdec_error'),
extra_filter=None,
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=True):
'''This queries the GAIA TAP service for a list of objects in an equatorial
coordinate box.
Parameters
----------
radeclbox : sequence of four floats
This defines the box to search in::
[ra_min, ra_max, decl_min, decl_max]
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
columns : sequence of str
This indicates which columns from the GAIA table to request for the
objects found within the search radius.
extra_filter: str or None
If this is provided, must be a valid ADQL filter string that is used to
further filter the cone-search results.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# this was generated using the awesome query generator at:
# https://gea.esac.esa.int/archive/
# NOTE: here we don't resolve the table name right away. this is because
# some of the GAIA mirrors use different table names, so we leave the table
# name to be resolved by the lower level tap_query function. this is done by
# the {{table}} construct.
query = (
"select {columns} from {{table}} where "
"CONTAINS(POINT('ICRS',{{table}}.ra, {{table}}.dec),"
"BOX('ICRS',{ra_center:.5f},{decl_center:.5f},"
"{ra_width:.5f},{decl_height:.5f}))=1"
"{extra_filter_str}"
)
ra_min, ra_max, decl_min, decl_max = radeclbox
ra_center = (ra_max + ra_min)/2.0
decl_center = (decl_max + decl_min)/2.0
ra_width = ra_max - ra_min
decl_height = decl_max - decl_min
if extra_filter is not None:
extra_filter_str = ' and %s ' % extra_filter
else:
extra_filter_str = ''
formatted_query = query.format(columns=', '.join(columns),
extra_filter_str=extra_filter_str,
ra_center=ra_center,
decl_center=decl_center,
ra_width=ra_width,
decl_height=decl_height)
return tap_query(formatted_query,
gaia_mirror=gaia_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later)
def objectid_search(gaiaid,
gaia_mirror=None,
columns=('source_id',
'ra','dec',
'phot_g_mean_mag',
'phot_bp_mean_mag',
'phot_rp_mean_mag',
'l','b',
'parallax, parallax_error',
'pmra','pmra_error',
'pmdec','pmdec_error'),
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=True):
'''This queries the GAIA TAP service for a single GAIA source ID.
Parameters
----------
gaiaid : str
The source ID of the object whose info will be collected.
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
columns : sequence of str
This indicates which columns from the GAIA table to request for the
objects found within the search radius.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# NOTE: here we don't resolve the table name right away. this is because
# some of the GAIA mirrors use different table names, so we leave the table
# name to be resolved by the lower level tap_query function. this is done by
# the {{table}} construct.
query = (
"select {columns} from {{table}} where "
"source_id = {gaiaid}"
)
formatted_query = query.format(columns=', '.join(columns),
gaiaid=gaiaid)
return tap_query(formatted_query,
gaia_mirror=gaia_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later)
|
mit
| 5,913,882,603,663,914,000
| 35.200487
| 101
| 0.518621
| false
| 4.804374
| false
| false
| false
|
peter-kiechle/tactile-sensors
|
python/interpolation_and_contour/interpolation_and_contour.py
|
1
|
7498
|
# -*- coding: utf-8 -*-
import os, sys
print("CWD: " + os.getcwd() )
# Load configuration file before pyplot
config_path = os.path.abspath('../matplotlib/')
sys.path.append(config_path)
import configuration as config
# Library path
lib_path = os.path.abspath('../../lib')
sys.path.append(lib_path)
import framemanager_python
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate
# Color map
# Define "bds_highcontrast" color map by Britton Smith <brittonsmith@gmail.com> from http://yt-project.org/
cdict = {'red': ((0.0, 80/256., 80/256.),
(0.2, 0.0, 0.0),
(0.4, 0.0, 0.0),
(0.6, 256/256., 256/256.),
(0.95, 256/256., 256/256.),
(1.0, 150/256., 150/256.)),
'green': ((0.0, 0/256., 0/256.),
(0.2, 0/256., 0/256.),
(0.4, 130/256., 130/256.),
(0.6, 256/256., 256/256.),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 80/256., 80/256.),
(0.2, 220/256., 220/256.),
(0.4, 0.0, 0.0),
(0.6, 20/256., 20/256.),
(1.0, 0.0, 0.0))}
plt.register_cmap(name='bds_highcontrast', data=cdict)
# Define YELLOW_RED colormap: each row consists of (x, y0, y1) where the x must increase from 0 to 1
#row i: x y0 y1
# /
# /
#row i+1: x y0 y1
cdict = {'red': ((0.0, 0.9, 0.9),
(1.0, 0.9, 0.9)),
'green': ((0.0, 0.9, 0.9),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0))}
plt.register_cmap(name='YELLOW_RED', data=cdict)
#cmap=plt.get_cmap('YELLOW_RED')
#cmap=plt.get_cmap('autumn')
#cmap=plt.get_cmap('gist_heat')
#cmap=plt.get_cmap('Spectral_r')
#cmap.set_under([0.0, 0.0, 0.0])
# Load profile
profileName = os.path.abspath("foam_ball_short.dsa")
frameID = 230
frameManager = framemanager_python.FrameManagerWrapper()
frameManager.load_profile(profileName);
numTSFrames = frameManager.get_tsframe_count();
matrixID = 1
# Load single frame
tsframe = np.copy( frameManager.get_tsframe(frameID, matrixID) );
cols = tsframe.shape[1]
rows = tsframe.shape[0]
# Add padding on border
padding = 2
v_padding = np.empty((padding, cols)); v_padding.fill(-50)
h_padding = np.empty((rows+2*padding, padding)); h_padding.fill(-50)
zs = np.vstack([v_padding, tsframe]) # Top
zs = np.vstack([zs, v_padding]) # Bottom
zs = np.hstack([h_padding, zs]) # Left
zs = np.hstack([zs, h_padding]) # Right
# Update matrix size with padding
cols = zs.shape[1]
rows = zs.shape[0]
# Coordinates of sampled data points
xs = np.arange(0, cols, 1)
ys = np.arange(0, rows, 1)
# Coordinates of interpolation points
scaleFactor = 10;
xi = np.linspace(xs.min(), xs.max(), cols*scaleFactor)
yi = np.linspace(ys.min(), ys.max(), rows*scaleFactor)
#------------------------------------------------------
# Interpolate with cubic splines
spline = scipy.interpolate.RectBivariateSpline(ys, xs, zs, kx=3, ky=3, s=0)
# Evaluate splines
zi = spline(yi, xi)
#------------------------------------------------------
'''
#------------------------------------------------------
# Polynomial interpolation: ‘linear’, ‘nearest’, ‘cubic’
coordinates = [(y, x) for y in ys for x in xs]
zs_flattened = np.ravel(zs, order='C')
coordinates_interpolated = [(y, x) for y in yi for x in xi]
# Interpolate with griddata
zi_flattened= scipy.interpolate.griddata(coordinates, zs_flattened, coordinates_interpolated, method='cubic')
# Reshape flattened array to 2D
zi = zi_flattened.reshape((rows*scaleFactor, cols*scaleFactor))
#------------------------------------------------------
'''
#------------------------------------------------------
# Old API
# Set up a regular grid of sampled data points
#ys, xs = np.meshgrid(xs, ys)
# Set up a regular grid of interpolated points
#yi, xi = np.meshgrid(xi, yi)
# Interpolate
#tck = scipy.interpolate.bisplrep(xs2, ys2, zs, kx=3, ky=3, s=0)
# Evaluate splines
#zi = scipy.interpolate.bisplev(xi2[:,0], yi2[0,:], tck)
#------------------------------------------------------
# Apply threshold to level out small values (interpolation ripples)
min_threshold = 25
zi[zi < min_threshold ] = 0
#########################################
# Plotting
#########################################
fig, ax = plt.subplots()
############
# Histogram
############
plt.hist(zi.flatten(), 128, range=(min_threshold, zi.max()), fc='k', ec='k')
plt.savefig("histogram.pdf", format='pdf')
plt.close()
########################
# Interpolated image
########################
fig, ax = plt.subplots()
# Interpolated image
#cmap=plt.get_cmap('gray')
cmap=plt.get_cmap('bds_highcontrast')
cax = ax.imshow(zi, cmap=cmap, vmin=zs.min(), vmax=zs.max(), origin='lower', extent=[xs.min(), xs.max(), ys.min(), ys.max()])
# Colorbar with countour levels
cbar = fig.colorbar(cax)
cbar.set_label('Raw sensor value', rotation=90)
cbar.solids.set_edgecolor("face") # set the color of the lines
ax.invert_yaxis()
ax.xaxis.tick_top()
plt.axis('off')
plt.savefig("interpolation.pdf", format='pdf')
plt.close()
############
# Contour
############
fig, ax = plt.subplots()
# Nearest-Neighbor Image
cax = ax.imshow(zs, interpolation='nearest', cmap=plt.get_cmap('gray'), vmin=zs.min(), vmax=zs.max(), origin='lower', extent=[xs.min(), xs.max(), ys.min(), ys.max()])
#------------------------------------------------------
# Contour lines: contour()
#------------------------------------------------------
countour_threshold = 50
levels = np.linspace(countour_threshold, zs.max(), 10)
#contour = ax.contour(xi, yi, zi, levels, linewidths=1.0, colors=[(0.0, 0.0, 0.0)], origin='upper') # black contour
contour = ax.contour(xi, yi, zi, levels, linewidths=1.0, colors=[(1.0, 0.0, 0.0)], origin='upper') # Red contour
#contour = ax.contour(xi, yi, zi, levels, linewidths=1.0, cmap=plt.get_cmap('bds_highcontrast'), origin='upper') # Colormap
#plt.clabel(contour, inline=True, fontsize=9)
# Colorbar with countour levels
cbar = fig.colorbar(cax)
cbar.add_lines(contour)
cbar.set_label('Raw sensor value', rotation=90)
cbar.solids.set_edgecolor("face") # set the color of the lines
'''
#------------------------------------------------------
# Filled contours: contourf()
#------------------------------------------------------
# Background image
background = np.empty((rows, cols)); background.fill(0)
cax = ax.imshow(background, cmap=plt.get_cmap('gray'), origin='lower', extent=[xs.min(), xs.max(), ys.min(), ys.max()] )
# Filled contour
countour_threshold = 100 # Ignore "ripples" from spline extrapolation
max_threshold = 0 # Boost the upper limit to avoid truncation error
levels = np.linspace(countour_threshold, zs.max(), num=10, endpoint=True)
# Levels correspond to midpoint of layers:
# Extend level range to enlarge top layer (avoid ugly hole)
levels[-1] = levels[-1] + (levels[-1] - levels[-2])/2
contour = ax.contourf(xi, yi, zi, levels=levels, cmap=plt.get_cmap('bds_highcontrast'), origin='upper')
cbar = fig.colorbar(contour, format='%.0f')
cbar.set_label('mV', rotation=0)
cbar.solids.set_edgecolor("face") # set the color of the lines
# Restore old levels
#levels[-1] = zs.max()
#cbar.set_ticks(levels)
#------------------------------------------------------
'''
ax.invert_yaxis()
ax.xaxis.tick_top()
plt.axis('off')
plt.savefig("contour.pdf", format='pdf')
plt.show()
|
gpl-3.0
| 8,087,984,003,630,864,000
| 28.015504
| 167
| 0.569329
| false
| 3.063011
| false
| false
| false
|
elopio/snapcraft
|
tests/unit/plugins/test_base.py
|
1
|
3087
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest.mock
from testtools.matchers import Equals
import snapcraft
from tests import unit
class TestBasePlugin(unit.TestCase):
def setUp(self):
super().setUp()
self.project_options = snapcraft.ProjectOptions()
def test_parallel_build_count_returns_1_when_disabled(self):
options = unit.MockOptions(disable_parallel=True)
plugin = snapcraft.BasePlugin('test_plugin', options,
self.project_options)
self.assertThat(plugin.parallel_build_count, Equals(1))
def test_parallel_build_count_returns_build_count_from_project(self):
options = unit.MockOptions(disable_parallel=False)
plugin = snapcraft.BasePlugin('test_plugin', options,
self.project_options)
unittest.mock.patch.object(
self.project_options, 'parallel_build_count', 2)
self.assertThat(plugin.parallel_build_count, Equals(2))
def test_part_name_with_forward_slash_is_one_directory(self):
plugin = snapcraft.BasePlugin('test/part', options=None)
os.makedirs(plugin.sourcedir)
self.assertIn('test\N{BIG SOLIDUS}part', os.listdir('parts'))
@unittest.mock.patch('snapcraft.internal.common.run')
def test_run_without_specifying_cwd(self, mock_run):
plugin = snapcraft.BasePlugin('test/part', options=None)
plugin.run(['ls'])
mock_run.assert_called_once_with(['ls'], cwd=plugin.builddir)
@unittest.mock.patch('snapcraft.internal.common.run')
def test_run_specifying_a_cwd(self, mock_run):
plugin = snapcraft.BasePlugin('test/part', options=None)
plugin.run(['ls'], cwd=plugin.sourcedir)
mock_run.assert_called_once_with(['ls'], cwd=plugin.sourcedir)
@unittest.mock.patch('snapcraft.internal.common.run_output')
def test_run_output_without_specifying_cwd(self, mock_run):
plugin = snapcraft.BasePlugin('test/part', options=None)
plugin.run_output(['ls'])
mock_run.assert_called_once_with(['ls'], cwd=plugin.builddir)
@unittest.mock.patch('snapcraft.internal.common.run_output')
def test_run_output_specifying_a_cwd(self, mock_run):
plugin = snapcraft.BasePlugin('test/part', options=None)
plugin.run_output(['ls'], cwd=plugin.sourcedir)
mock_run.assert_called_once_with(['ls'], cwd=plugin.sourcedir)
|
gpl-3.0
| -4,015,501,113,157,418,000
| 38.075949
| 73
| 0.685455
| false
| 3.723764
| true
| false
| false
|
jlweand/DssVisualizer
|
app/plugins/datasource/elasticsearch/multiIncludeThroughput.py
|
1
|
5393
|
# Copyright (C) 2016 Jamie Acosta, Jennifer Weand, Juan Soto, Mark Eby, Mark Smith, Andres Olivas
#
# This file is part of DssVisualizer.
#
# DssVisualizer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DssVisualizer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DssVisualizer. If not, see <http://www.gnu.org/licenses/>.
from plugins.datasource.elasticsearch.annotations import Annotations
from plugins.datasource.elasticsearch.common import Common
from elasticsearch import Elasticsearch
from plugins.datasource.elasticsearch.selecting import Selecting
class MultiIncludeThroughput:
def __init__(self):
self.esIndex = Common().getIndexName()
self.multiIncludeThroughputDocType = "multiincludethroughput"
self.resultSize = Common().getSizeToReturn()
def importMultiIncludeThroughputData(self, jsonObjects):
es = Elasticsearch()
es.indices.create(index=self.esIndex, ignore=400)
insertedCount = 0
for json in jsonObjects:
result = es.index(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, body=json)
insertedCount += result["_shards"]["successful"]
return insertedCount
# select data by date range of the 'start' column
def selectMultiIncludeThroughputData(self, startDate, endDate, techNames, eventNames, eventTechNames):
select = Selecting().generateSelectQuery(startDate, endDate, techNames, eventNames, eventTechNames, False, True)
data = Elasticsearch().search(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, size=self.resultSize, body=select)
return Selecting().fixAllTheData(data)
# select single data point
def selectMultiIncludeThroughputDataById(self, dataId):
data = Elasticsearch().get(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, id=dataId)
return Selecting().fixOneData(data)
# add or edits a fixedData record to this data point
def modifyFixedMultiIncludeThroughputData(self, dataId, traffic_xy_id, className, x, y, isDeleted):
updateFixed = {"doc": {"fixedData": {"traffic_xy_id": traffic_xy_id, "className": className, "x": x, "y": y, "isDeleted": isDeleted}}}
result = Elasticsearch().update(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, body=updateFixed, id = dataId)
return Common().getModfiedCount(result)
# delete the fixedData
def deleteFixedMultiIncludeThroughputData(self, dataId):
deleteFixed = {"script" : "ctx._source.remove(\"fixedData\")"}
result = Elasticsearch().update(index=self.esIndex, doc_type=self.multiIncludeThroughputDocType, body=deleteFixed, id = dataId)
return Common().getModfiedCount(result)
# add or edit an annotation to the object. This will add a single 'annotation' attribute to the object.
def modifyAnnotationMultiIncludeThroughput(self, dataId, annotationText):
return Annotations().modifyAnnotation(self.multiIncludeThroughputDocType, dataId, annotationText)
# add an annotation to an array of annotations for the dataId
def addAnnotationToArrayMultiIncludeThroughput(self, dataId, annotationText):
return Annotations().addAnnotationToArray(self.multiIncludeThroughputDocType, dataId, annotationText)
# edit an annotation in the array of annotations.
def editAnnotationInArrayMultiIncludeThroughput(self, dataId, oldAnnotationText, newAnnotationText):
return Annotations().editAnnotationInArray(self.multiIncludeThroughputDocType, dataId, oldAnnotationText, newAnnotationText)
# delete an annotation from array for the dataId
def deleteAnnotationFromArrayMultiIncludeThroughput(self, dataId, annotationText):
return Annotations().deleteAnnotationFromArray(self.multiIncludeThroughputDocType, dataId, annotationText)
# deletes all annotations for the dataId
def deleteAllAnnotationsForMultiIncludeThroughput(self, dataId):
return Annotations().deleteAllAnnotationsForData(self.multiIncludeThroughputDocType, dataId)
# add an annotation to the timeline, not a datapoint
def addAnnotationToMultiIncludeThroughputTimeline(self, multiIncludeThroughput, annotationText):
return Annotations().addAnnotationToTimeline(self.multiIncludeThroughputDocType, multiIncludeThroughput, annotationText)
# def getDistinctTechNamesForEvents(self, eventNames):
# collection = self.getMultiIncludeThroughputCollection()
# return TechAndEventNames().getDistinctTechNamesForEvents(collection, eventNames)
#
# def getDistinctEventNames(self):
# collection = self.getMultiIncludeThroughputCollection()
# return TechAndEventNames().getDistinctEventNames(collection)
#
# def getDistinctTechAndEventNames(self):
# collection = self.getMultiIncludeThroughputCollection()
# return TechAndEventNames().getDistinctTechAndEventNames(collection)
|
gpl-3.0
| 8,172,515,192,321,630,000
| 54.597938
| 142
| 0.76043
| false
| 4.021626
| false
| false
| false
|
pydanny/dj-stripe
|
djstripe/contrib/rest_framework/views.py
|
1
|
2877
|
"""
.. module:: dj-stripe.contrib.rest_framework.views.
:synopsis: Views for the dj-stripe REST API.
.. moduleauthor:: Philippe Luickx (@philippeluickx)
"""
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from ...models import Customer
from ...settings import CANCELLATION_AT_PERIOD_END, subscriber_request_callback
from .serializers import CreateSubscriptionSerializer, SubscriptionSerializer
class SubscriptionRestView(APIView):
"""API Endpoints for the Subscription object."""
permission_classes = (IsAuthenticated,)
def get(self, request, **kwargs):
"""
Return the customer's valid subscriptions.
Returns with status code 200.
"""
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
serializer = SubscriptionSerializer(customer.subscription)
return Response(serializer.data)
def post(self, request, **kwargs):
"""
Create a new current subscription for the user.
Returns with status code 201.
"""
serializer = CreateSubscriptionSerializer(data=request.data)
if serializer.is_valid():
try:
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
customer.add_card(serializer.data["stripe_token"])
charge_immediately = serializer.data.get("charge_immediately")
if charge_immediately is None:
charge_immediately = True
customer.subscribe(serializer.data["plan"], charge_immediately)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception:
# TODO: Better error messages
return Response(
"Something went wrong processing the payment.",
status=status.HTTP_400_BAD_REQUEST,
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, **kwargs):
"""
Mark the customers current subscription as canceled.
Returns with status code 204.
"""
try:
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
customer.subscription.cancel(at_period_end=CANCELLATION_AT_PERIOD_END)
return Response(status=status.HTTP_204_NO_CONTENT)
except Exception:
return Response(
"Something went wrong cancelling the subscription.",
status=status.HTTP_400_BAD_REQUEST,
)
|
bsd-3-clause
| 8,368,867,647,684,724,000
| 33.25
| 82
| 0.63017
| false
| 4.755372
| false
| false
| false
|
jul/dsat
|
mics_utils/universal_testing_client.py
|
1
|
2218
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import time, sleep, asctime as _asctime
import sched
from random import randint
import logging
import logging.handlers
import sys,os
import readline
from readline import write_history_file, read_history_file
import zmq
from simplejson import dumps, load, loads
from dsat.message import send_vector, fast_parse_vector, extract_vector_from_dict
from dsat.state import _f
import dsat
print dsat.__version__
_to = sys.argv[1]
_mode = sys.argv[2]
_stable = sys.argv[3] == "bind"
_what = None if len(sys.argv) <= 4 else sys.argv[4]
my_logger = logging.getLogger('Logger')
my_logger.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
my_logger.addHandler(handler)
def D(msg):
my_logger.warning("%r:%s" % (os.getpid(), msg))
HIST = ".hist_zmq"
if not os.path.exists(HIST):
write_history_file(HIST)
read_history_file(HIST)
### init phase load its parameters
context = zmq.Context()
client = context.socket(getattr( zmq, _mode))
sleep(1)
_boc = _stable and "bind" or "connect"
_cnx_mode = getattr(client, _boc )
_cnx_mode(_to)
if _mode == "SUB":
client.setsockopt(zmq.SUBSCRIBE, '')
print "USBSRCIRINB ALL"
sleep(1)
print "address: %r" % _to
print "PATTERN: %r" % _mode
print _boc
print "message template is: %s" % dumps(extract_vector_from_dict({}), indent=4)
abort = False
recv = False
message=_what
while message and not abort:
if "q" == message:
break
if "r" == _what:
recv=True
elif _what:
message = _what
abort = True
else:
message = "".join(iter(lambda :raw_input("%s >" % _to), "ç"))
try:
if recv:
cpt = 0
while True:
print "waiting ..."
print fast_parse_vector(client)
print "RECEVIED"
print (" " * cpt ) + [ "\\", "-" , "/" , "|" ][cpt%4]
cpt += 1
else:
print("SENT %s" % loads(message))
print "\n"
print client.socket_type
send_vector(client, loads(message))
except Exception as e:
print(repr(e))
D("sent %r" % message)
write_history_file(HIST)
|
bsd-2-clause
| 6,014,448,464,119,381,000
| 23.362637
| 81
| 0.608029
| false
| 3.227074
| false
| false
| false
|
Grumbel/rfactorlcd
|
tests/test_ac_state.py
|
1
|
1625
|
# rFactor Remote LCD
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import struct
import os
from rfactorlcd.ac_state import HandshakeResponse, RTLap, RTCarInfo
datadir = os.path.join(os.path.dirname(__file__), "data")
with open(os.path.join(datadir, "ac-hand.log"), "rb") as fin:
handshake_response_data = fin.read()
with open(os.path.join(datadir, "ac-1.log"), "rb") as fin:
car_data = fin.read()
with open(os.path.join(datadir, "ac-2.log"), "rb") as fin:
lap_data = fin.read()
class AssettoCorsaStateTestCase(unittest.TestCase):
def test_handshake_parsing(self):
data = HandshakeResponse(handshake_response_data)
print data
def test_lap_parsing(self):
print len(lap_data)
lapinfo = RTLap(lap_data)
print lapinfo
def test_carinfo_parsing(self):
print len(car_data)
car = RTCarInfo(car_data)
print car
if __name__ == '__main__':
unittest.main()
# EOF #
|
gpl-3.0
| -4,213,333,178,204,055,000
| 28.545455
| 71
| 0.693538
| false
| 3.472222
| true
| false
| false
|
nlhepler/freetype-py3
|
examples/glyph-outline.py
|
1
|
1311
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Glyph outline rendering
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
face = Face(b'./Vera.ttf')
face.set_char_size( 4*48*64 )
flags = FT_LOAD_DEFAULT | FT_LOAD_NO_BITMAP
face.load_char('S', flags )
slot = face.glyph
glyph = slot.get_glyph()
stroker = Stroker( )
stroker.set(64, FT_STROKER_LINECAP_ROUND, FT_STROKER_LINEJOIN_ROUND, 0 )
glyph.stroke( stroker )
blyph = glyph.to_bitmap(FT_RENDER_MODE_NORMAL, Vector(0,0))
bitmap = blyph.bitmap
width, rows, pitch = bitmap.width, bitmap.rows, bitmap.pitch
top, left = blyph.top, blyph.left
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z = numpy.array(data,dtype=numpy.ubyte).reshape(rows, width)
plt.figure(figsize=(6,8))
plt.imshow(Z, interpolation='nearest', cmap=plt.cm.gray_r)
plt.savefig('test.pdf', format='pdf')
plt.show()
|
bsd-3-clause
| -7,109,555,045,368,138,000
| 31.775
| 79
| 0.56598
| false
| 3.422977
| false
| false
| false
|
HybridF5/jacket
|
jacket/tests/compute/unit/fake_notifier.py
|
1
|
3739
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from jacket import rpc
NOTIFICATIONS = []
VERSIONED_NOTIFICATIONS = []
def reset():
del NOTIFICATIONS[:]
del VERSIONED_NOTIFICATIONS[:]
FakeMessage = collections.namedtuple('Message',
['publisher_id', 'priority',
'event_type', 'payload', 'context'])
class FakeNotifier(object):
def __init__(self, transport, publisher_id, serializer=None):
self.transport = transport
self.publisher_id = publisher_id
self._serializer = serializer or messaging.serializer.NoOpSerializer()
for priority in ['debug', 'info', 'warn', 'error', 'critical']:
setattr(self, priority,
functools.partial(self._notify, priority.upper()))
def prepare(self, publisher_id=None):
if publisher_id is None:
publisher_id = self.publisher_id
return self.__class__(self.transport, publisher_id,
serializer=self._serializer)
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
# NOTE(sileht): simulate the kombu serializer
# this permit to raise an exception if something have not
# been serialized correctly
jsonutils.to_primitive(payload)
# NOTE(melwitt): Try to serialize the context, as the rpc would.
# An exception will be raised if something is wrong
# with the context.
self._serializer.serialize_context(ctxt)
msg = FakeMessage(self.publisher_id, priority, event_type,
payload, ctxt)
NOTIFICATIONS.append(msg)
class FakeVersionedNotifier(FakeNotifier):
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
VERSIONED_NOTIFICATIONS.append({'publisher_id': self.publisher_id,
'priority': priority,
'event_type': event_type,
'payload': payload})
def stub_notifier(stubs):
stubs.Set(messaging, 'Notifier', FakeNotifier)
if rpc.LEGACY_NOTIFIER and rpc.NOTIFIER:
stubs.Set(rpc, 'LEGACY_NOTIFIER',
FakeNotifier(rpc.LEGACY_NOTIFIER.transport,
rpc.LEGACY_NOTIFIER.publisher_id,
serializer=getattr(rpc.LEGACY_NOTIFIER,
'_serializer',
None)))
stubs.Set(rpc, 'NOTIFIER',
FakeVersionedNotifier(rpc.NOTIFIER.transport,
rpc.NOTIFIER.publisher_id,
serializer=getattr(rpc.NOTIFIER,
'_serializer',
None)))
|
apache-2.0
| 6,867,932,777,672,015,000
| 39.641304
| 78
| 0.57395
| false
| 4.824516
| false
| false
| false
|
cernops/keystone
|
keystone/policy/backends/rules.py
|
1
|
2753
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy engine for keystone."""
from oslo_log import log
from oslo_policy import policy as common_policy
import keystone.conf
from keystone import exception
from keystone.policy.backends import base
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
_ENFORCER = None
def reset():
global _ENFORCER
_ENFORCER = None
def init():
global _ENFORCER
if not _ENFORCER:
_ENFORCER = common_policy.Enforcer(CONF)
def enforce(credentials, action, target, do_raise=True):
"""Verify that the action is valid on the target in this context.
:param credentials: user credentials
:param action: string representing the action to be checked, which should
be colon separated for clarity.
:param target: dictionary representing the object of the action for object
creation this should be a dictionary representing the
location of the object e.g. {'project_id':
object.project_id}
:raises keystone.exception.Forbidden: If verification fails.
Actions should be colon separated for clarity. For example:
* identity:list_users
"""
init()
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.ForbiddenAction, action=action,
do_raise=do_raise)
return _ENFORCER.enforce(action, target, credentials, **extra)
class Policy(base.PolicyDriverV8):
def enforce(self, credentials, action, target):
LOG.debug('enforce %(action)s: %(credentials)s', {
'action': action,
'credentials': credentials})
enforce(credentials, action, target)
def create_policy(self, policy_id, policy):
raise exception.NotImplemented()
def list_policies(self):
raise exception.NotImplemented()
def get_policy(self, policy_id):
raise exception.NotImplemented()
def update_policy(self, policy_id, policy):
raise exception.NotImplemented()
def delete_policy(self, policy_id):
raise exception.NotImplemented()
|
apache-2.0
| -5,975,855,063,058,688,000
| 28.923913
| 78
| 0.680712
| false
| 4.248457
| false
| false
| false
|
s910324/Sloth
|
bokehPlotter/bokehLine.py
|
1
|
1301
|
class bokehLine(object):
def __init__(self, line, symbol = None, viewNum = None, parent = None):
self.line = line
self.symbol = symbol
self.viewNum = viewNum
self.style = None
self.val = {'name' : self.line.name,
'color' : self.line.line_color,
'width' : self.line.line_width,
'style' : None,
'symbol' : self.symbol,
'visible' : self.line.visible,
'viewNum' : self.viewNum}
def line_val(self, name = None, color = None, width = None,
style = None, symbol = None, visible = None, viewNum = None):
if name is not None:
self.line.name = name
if color:
self.line.line_color = color
if width is not None:
self.line.line_width = width
if style:
self.style = style
if symbol:
self.symbol = symbol
if visible is not None:
self.line.visible = visible
if viewNum is not None:
self.viewNum = viewNum
self.val.update({'name' : self.line.name})
self.val.update({'color' : self.line.line_color})
self.val.update({'width' : self.line.line_width})
self.val.update({'style' : self.style})
self.val.update({'symbol' : self.symbol})
self.val.update({'visible' : self.line.visible})
self.val.update({'viewNum' : self.viewNum})
return self.val
|
lgpl-3.0
| -8,097,567,311,734,556,000
| 30.731707
| 72
| 0.607994
| false
| 2.878319
| false
| false
| false
|
dwcoder/diceware
|
tests/test_config.py
|
1
|
5149
|
import os
from diceware.config import (
OPTIONS_DEFAULTS, valid_locations, get_configparser, get_config_dict,
configparser,
)
class TestConfigModule(object):
# tests for diceware.config
def test_defaults(self):
# there is a set of defaults for options available
assert OPTIONS_DEFAULTS is not None
def test_valid_locations(self, home_dir):
# we look for config files in user home and local dir
assert valid_locations() == [
str(home_dir / ".diceware.ini")
]
def test_get_configparser(self, tmpdir):
# we can parse simple configs
conf_path = tmpdir / "sample.ini"
conf_path.write("\n".join(["[diceware]", "num=1", ""]))
found, config = get_configparser([str(conf_path), ])
assert found == [str(conf_path)]
def test_get_configparser_empty_list(self):
# we cope with empty config file lists
found, config = get_configparser([])
assert found == []
def test_get_configparser_no_list(self, home_dir):
# we cope with no list at all
found, config = get_configparser()
assert found == []
def test_get_configparser_default_path(self, home_dir):
# a config file in $HOME is looked up by default
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "num = 3", ""]))
found, config = get_configparser()
assert found == [str(config_file)]
def test_get_config_dict_no_config_file(self, home_dir):
# we get config values even without a config file.
conf_dict = get_config_dict()
assert conf_dict == OPTIONS_DEFAULTS
def test_get_config_dict_no_diceware_section(self, home_dir):
# we cope with config files, if they do not contain a diceware config
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[not-diceware]", "num = 3", ""]))
conf_dict = get_config_dict()
assert conf_dict == OPTIONS_DEFAULTS
def test_get_config_dict(self, home_dir):
# we can get config values from files as a dict.
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "num = 3", ""]))
conf_dict = get_config_dict()
assert len(conf_dict) == len(OPTIONS_DEFAULTS)
assert conf_dict != OPTIONS_DEFAULTS
def test_get_config_dict_int(self, home_dir):
# integer values are interpolated correctly
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "num=3", ""]))
conf_dict = get_config_dict()
assert "num" in conf_dict.keys()
assert conf_dict["num"] == 3
def test_get_config_dict_bool(self, home_dir):
# boolean values are interpolated correctly
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "caps = Off", ""]))
conf_dict = get_config_dict()
assert "caps" in conf_dict.keys()
assert conf_dict["caps"] is False
config_file.write("\n".join(["[diceware]", "caps = On", ""]))
assert get_config_dict()["caps"] is True
def test_get_config_dict_ignore_irrelevant(self, home_dir):
# values that have no default are ignored
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "foo = bar", ""]))
conf_dict = get_config_dict()
assert "foo" not in conf_dict.keys()
def test_get_config_dict_string(self, home_dir):
# string values are interpolated correctly
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "delimiter=!", ""]))
conf_dict = get_config_dict()
assert conf_dict["delimiter"] == "!"
def test_get_config_dict_string_empty(self, home_dir):
# we can set empty string values
config_file = home_dir / ".diceware.ini"
config_file.write("\n".join(["[diceware]", "delimiter=", ""]))
conf_dict = get_config_dict()
assert conf_dict["delimiter"] == ""
class TestSampleIni(object):
# test local sample ini file
def test_complete_options_set(self, home_dir):
# make sure the set of options in sample file is complete
sample_path = os.path.join(
os.path.dirname(__file__), 'sample_dot_diceware.ini')
parser = configparser.SafeConfigParser()
found = parser.read([sample_path, ])
assert sample_path in found
assert parser.has_section('diceware')
for key, val in OPTIONS_DEFAULTS.items():
# make sure option keywords are contained.
assert parser.has_option('diceware', key)
def test_no_invalid_options(self, home_dir):
# ensure we have no obsolete/unused options in sample
sample_path = os.path.join(
os.path.dirname(__file__), 'sample_dot_diceware.ini')
parser = configparser.SafeConfigParser()
parser.read([sample_path, ])
for option in parser.options('diceware'):
assert option in OPTIONS_DEFAULTS.keys()
|
gpl-3.0
| -8,182,861,869,564,112,000
| 39.543307
| 77
| 0.606331
| false
| 3.680486
| true
| false
| false
|
SalesforceFoundation/mrbelvedereci
|
metaci/testresults/migrations/0009_auto_20181207_2010.py
|
1
|
1093
|
# Generated by Django 2.1.3 on 2018-12-07 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("testresults", "0008_merge_20180911_1915"),
]
operations = [
migrations.AlterField(
model_name="testclass",
name="test_type",
field=models.CharField(
choices=[
("Apex", "Apex"),
("JUnit", "JUnit"),
("Robot", "Robot"),
("Other", "Other"),
],
db_index=True,
max_length=32,
),
),
migrations.AlterField(
model_name="testresult",
name="outcome",
field=models.CharField(
choices=[
("Pass", "Pass"),
("CompileFail", "CompileFail"),
("Fail", "Fail"),
("Skip", "Skip"),
],
db_index=True,
max_length=16,
),
),
]
|
bsd-3-clause
| -2,158,632,882,686,530,300
| 25.658537
| 52
| 0.399817
| false
| 4.814978
| false
| false
| false
|
demonchild2112/travis-test
|
grr/server/grr_response_server/gui/selenium_tests/report_test.py
|
1
|
4596
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from selenium.webdriver.common import keys
from grr_response_core.lib import rdfvalue
from grr_response_server import data_store
from grr_response_server.gui import gui_test_lib
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import test_lib
def AddFakeAuditLog(user=None, router_method_name=None):
data_store.REL_DB.WriteAPIAuditEntry(
rdf_objects.APIAuditEntry(
username=user,
router_method_name=router_method_name,
))
class TestReports(gui_test_lib.GRRSeleniumTest):
"""Test the reports interface."""
def testReports(self):
"""Test the reports interface."""
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(user="User123")
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22")):
AddFakeAuditLog(user="User456")
# Make "test" user an admin.
self.CreateAdminUser(u"test")
self.Open("/#/stats/")
# Go to reports.
self.Click("css=#MostActiveUsersReportPlugin_anchor i.jstree-icon")
self.WaitUntil(self.IsTextPresent, "Server | User Breakdown")
# Enter a timerange that only matches one of the two fake events.
self.Type("css=grr-form-datetime input", "2012-12-21 12:34")
self.Click("css=button:contains('Show report')")
self.WaitUntil(self.IsTextPresent, "User456")
self.assertFalse(self.IsTextPresent("User123"))
def testReportsDontIncludeTimerangesInUrlsOfReportsThatDontUseThem(self):
client_id = self.SetupClient(0)
self.AddClientLabel(client_id, u"owner", u"bar")
self.Open("/#/stats/")
# Go to reports.
self.Click("css=#MostActiveUsersReportPlugin_anchor i.jstree-icon")
self.WaitUntil(self.IsTextPresent, "Server | User Breakdown")
# Default values aren't shown in the url.
self.WaitUntilNot(lambda: "start_time" in self.GetCurrentUrlPath())
self.assertNotIn("duration", self.GetCurrentUrlPath())
# Enter a timerange.
self.Type("css=grr-form-datetime input", "2012-12-21 12:34")
self.Type("css=grr-form-duration input", "2w")
self.Click("css=button:contains('Show report')")
# Reports that require timeranges include nondefault values in the url when
# `Show report' has been clicked.
self.WaitUntil(lambda: "start_time" in self.GetCurrentUrlPath())
self.assertIn("duration", self.GetCurrentUrlPath())
# Select a different report.
self.Click("css=#LastActiveReportPlugin_anchor i.jstree-icon")
self.WaitUntil(self.IsTextPresent, "Client | Last Active")
# The default label isn't included in the url.
self.WaitUntilNot(lambda: "bar" in self.GetCurrentUrlPath())
# Select a client label.
self.Select("css=grr-report select", "bar")
self.Click("css=button:contains('Show report')")
# Reports that require labels include them in the url after `Show report'
# has been clicked.
self.WaitUntil(lambda: "bar" in self.GetCurrentUrlPath())
# Reports that dont require timeranges don't mention them in the url.
self.assertNotIn("start_time", self.GetCurrentUrlPath())
self.assertNotIn("duration", self.GetCurrentUrlPath())
# Select a different report.
self.Click("css=#GRRVersion7ReportPlugin_anchor i.jstree-icon")
self.WaitUntil(self.IsTextPresent, "Active Clients - 7 Days Active")
# The label is cleared when report type is changed.
self.WaitUntilNot(lambda: "bar" in self.GetCurrentUrlPath())
self.assertNotIn("start_time", self.GetCurrentUrlPath())
self.assertNotIn("duration", self.GetCurrentUrlPath())
class TestDateTimeInput(gui_test_lib.GRRSeleniumTest):
"""Tests datetime-form-directive."""
def testInputAllowsInvalidText(self):
# Make "test" user an admin.
self.CreateAdminUser(u"test")
# Open any page that shows the datetime-form-directive.
self.Open("/#/stats/HuntApprovalsReportPlugin")
datetime_input = self.WaitUntil(self.GetVisibleElement,
"css=grr-form-datetime input")
value = datetime_input.get_attribute("value")
self.assertRegexpMatches(value, r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}")
self.assertStartsWith(value, "20")
datetime_input.send_keys(keys.Keys.BACKSPACE)
self.WaitUntilNot(self.IsTextPresent, value)
self.assertEqual(value[:-1], datetime_input.get_attribute("value"))
if __name__ == "__main__":
app.run(test_lib.main)
|
apache-2.0
| -8,424,589,681,675,763,000
| 34.90625
| 79
| 0.708442
| false
| 3.554524
| true
| false
| false
|
rahuldan/sympy
|
sympy/sets/fancysets.py
|
2
|
46732
|
from __future__ import print_function, division
from sympy.logic.boolalg import And
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.compatibility import as_int, with_metaclass, range, PY3
from sympy.core.expr import Expr
from sympy.core.function import Lambda, _coeff_isneg
from sympy.core.singleton import Singleton, S
from sympy.core.symbol import Dummy, symbols, Wild
from sympy.core.sympify import _sympify, sympify, converter
from sympy.sets.sets import (Set, Interval, Intersection, EmptySet, Union,
FiniteSet, imageset)
from sympy.sets.conditionset import ConditionSet
from sympy.utilities.misc import filldedent, func_name
class Naturals(with_metaclass(Singleton, Set)):
"""
Represents the natural numbers (or counting numbers) which are all
positive integers starting from 1. This set is also available as
the Singleton, S.Naturals.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Naturals)
>>> next(iterable)
1
>>> next(iterable)
2
>>> next(iterable)
3
>>> pprint(S.Naturals.intersect(Interval(0, 10)))
{1, 2, ..., 10}
See Also
========
Naturals0 : non-negative integers (i.e. includes 0, too)
Integers : also includes negative integers
"""
is_iterable = True
_inf = S.One
_sup = S.Infinity
def _intersect(self, other):
if other.is_Interval:
return Intersection(
S.Integers, other, Interval(self._inf, S.Infinity))
return None
def _contains(self, other):
if other.is_positive and other.is_integer:
return S.true
elif other.is_integer is False or other.is_positive is False:
return S.false
def __iter__(self):
i = self._inf
while True:
yield i
i = i + 1
@property
def _boundary(self):
return self
class Naturals0(Naturals):
"""Represents the whole numbers which are all the non-negative integers,
inclusive of zero.
See Also
========
Naturals : positive integers; does not include 0
Integers : also includes the negative integers
"""
_inf = S.Zero
def _contains(self, other):
if other.is_integer and other.is_nonnegative:
return S.true
elif other.is_integer is False or other.is_nonnegative is False:
return S.false
class Integers(with_metaclass(Singleton, Set)):
"""
Represents all integers: positive, negative and zero. This set is also
available as the Singleton, S.Integers.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Integers)
>>> next(iterable)
0
>>> next(iterable)
1
>>> next(iterable)
-1
>>> next(iterable)
2
>>> pprint(S.Integers.intersect(Interval(-4, 4)))
{-4, -3, ..., 4}
See Also
========
Naturals0 : non-negative integers
Integers : positive and negative integers and zero
"""
is_iterable = True
def _intersect(self, other):
from sympy.functions.elementary.integers import floor, ceiling
if other is Interval(S.NegativeInfinity, S.Infinity) or other is S.Reals:
return self
elif other.is_Interval:
s = Range(ceiling(other.left), floor(other.right) + 1)
return s.intersect(other) # take out endpoints if open interval
return None
def _contains(self, other):
if other.is_integer:
return S.true
elif other.is_integer is False:
return S.false
def __iter__(self):
yield S.Zero
i = S.One
while True:
yield i
yield -i
i = i + 1
@property
def _inf(self):
return -S.Infinity
@property
def _sup(self):
return S.Infinity
@property
def _boundary(self):
return self
def _eval_imageset(self, f):
expr = f.expr
if not isinstance(expr, Expr):
return
if len(f.variables) > 1:
return
n = f.variables[0]
# f(x) + c and f(-x) + c cover the same integers
# so choose the form that has the fewest negatives
c = f(0)
fx = f(n) - c
f_x = f(-n) - c
neg_count = lambda e: sum(_coeff_isneg(_) for _ in Add.make_args(e))
if neg_count(f_x) < neg_count(fx):
expr = f_x + c
a = Wild('a', exclude=[n])
b = Wild('b', exclude=[n])
match = expr.match(a*n + b)
if match and match[a]:
# canonical shift
expr = match[a]*n + match[b] % match[a]
if expr != f.expr:
return ImageSet(Lambda(n, expr), S.Integers)
class Reals(with_metaclass(Singleton, Interval)):
def __new__(cls):
return Interval.__new__(cls, -S.Infinity, S.Infinity)
def __eq__(self, other):
return other == Interval(-S.Infinity, S.Infinity)
def __hash__(self):
return hash(Interval(-S.Infinity, S.Infinity))
class ImageSet(Set):
"""
Image of a set under a mathematical function. The transformation
must be given as a Lambda function which has as many arguments
as the elements of the set upon which it operates, e.g. 1 argument
when acting on the set of integers or 2 arguments when acting on
a complex region.
This function is not normally called directly, but is called
from `imageset`.
Examples
========
>>> from sympy import Symbol, S, pi, Dummy, Lambda
>>> from sympy.sets.sets import FiniteSet, Interval
>>> from sympy.sets.fancysets import ImageSet
>>> x = Symbol('x')
>>> N = S.Naturals
>>> squares = ImageSet(Lambda(x, x**2), N) # {x**2 for x in N}
>>> 4 in squares
True
>>> 5 in squares
False
>>> FiniteSet(0, 1, 2, 3, 4, 5, 6, 7, 9, 10).intersect(squares)
{1, 4, 9}
>>> square_iterable = iter(squares)
>>> for i in range(4):
... next(square_iterable)
1
4
9
16
>>> n = Dummy('n')
>>> solutions = ImageSet(Lambda(n, n*pi), S.Integers) # solutions of sin(x) = 0
>>> dom = Interval(-1, 1)
>>> dom.intersect(solutions)
{0}
See Also
========
sympy.sets.sets.imageset
"""
def __new__(cls, lamda, base_set):
if not isinstance(lamda, Lambda):
raise ValueError('first argument must be a Lambda')
if lamda is S.IdentityFunction:
return base_set
if not lamda.expr.free_symbols or not lamda.expr.args:
return FiniteSet(lamda.expr)
return Basic.__new__(cls, lamda, base_set)
lamda = property(lambda self: self.args[0])
base_set = property(lambda self: self.args[1])
def __iter__(self):
already_seen = set()
for i in self.base_set:
val = self.lamda(i)
if val in already_seen:
continue
else:
already_seen.add(val)
yield val
def _is_multivariate(self):
return len(self.lamda.variables) > 1
def _contains(self, other):
from sympy.matrices import Matrix
from sympy.solvers.solveset import solveset, linsolve
from sympy.utilities.iterables import is_sequence, iterable, cartes
L = self.lamda
if is_sequence(other):
if not is_sequence(L.expr):
return S.false
if len(L.expr) != len(other):
raise ValueError(filldedent('''
Dimensions of other and output of Lambda are different.'''))
elif iterable(other):
raise ValueError(filldedent('''
`other` should be an ordered object like a Tuple.'''))
solns = None
if self._is_multivariate():
if not is_sequence(L.expr):
# exprs -> (numer, denom) and check again
# XXX this is a bad idea -- make the user
# remap self to desired form
return other.as_numer_denom() in self.func(
Lambda(L.variables, L.expr.as_numer_denom()), self.base_set)
eqs = [expr - val for val, expr in zip(other, L.expr)]
variables = L.variables
free = set(variables)
if all(i.is_number for i in list(Matrix(eqs).jacobian(variables))):
solns = list(linsolve([e - val for e, val in
zip(L.expr, other)], variables))
else:
syms = [e.free_symbols & free for e in eqs]
solns = {}
for i, (e, s, v) in enumerate(zip(eqs, syms, other)):
if not s:
if e != v:
return S.false
solns[vars[i]] = [v]
continue
elif len(s) == 1:
sy = s.pop()
sol = solveset(e, sy)
if sol is S.EmptySet:
return S.false
elif isinstance(sol, FiniteSet):
solns[sy] = list(sol)
else:
raise NotImplementedError
else:
raise NotImplementedError
solns = cartes(*[solns[s] for s in variables])
else:
x = L.variables[0]
if isinstance(L.expr, Expr):
# scalar -> scalar mapping
solnsSet = solveset(L.expr - other, x)
if solnsSet.is_FiniteSet:
solns = list(solnsSet)
else:
msgset = solnsSet
else:
# scalar -> vector
for e, o in zip(L.expr, other):
solns = solveset(e - o, x)
if solns is S.EmptySet:
return S.false
for soln in solns:
try:
if soln in self.base_set:
break # check next pair
except TypeError:
if self.base_set.contains(soln.evalf()):
break
else:
return S.false # never broke so there was no True
return S.true
if solns is None:
raise NotImplementedError(filldedent('''
Determining whether %s contains %s has not
been implemented.''' % (msgset, other)))
for soln in solns:
try:
if soln in self.base_set:
return S.true
except TypeError:
return self.base_set.contains(soln.evalf())
return S.false
@property
def is_iterable(self):
return self.base_set.is_iterable
def _intersect(self, other):
from sympy.solvers.diophantine import diophantine
if self.base_set is S.Integers:
g = None
if isinstance(other, ImageSet) and other.base_set is S.Integers:
g = other.lamda.expr
m = other.lamda.variables[0]
elif other is S.Integers:
m = g = Dummy('x')
if g is not None:
f = self.lamda.expr
n = self.lamda.variables[0]
# Diophantine sorts the solutions according to the alphabetic
# order of the variable names, since the result should not depend
# on the variable name, they are replaced by the dummy variables
# below
a, b = Dummy('a'), Dummy('b')
f, g = f.subs(n, a), g.subs(m, b)
solns_set = diophantine(f - g)
if solns_set == set():
return EmptySet()
solns = list(diophantine(f - g))
if len(solns) != 1:
return
# since 'a' < 'b', select soln for n
nsol = solns[0][0]
t = nsol.free_symbols.pop()
return imageset(Lambda(n, f.subs(a, nsol.subs(t, n))), S.Integers)
if other == S.Reals:
from sympy.solvers.solveset import solveset_real
from sympy.core.function import expand_complex
if len(self.lamda.variables) > 1:
return None
f = self.lamda.expr
n = self.lamda.variables[0]
n_ = Dummy(n.name, real=True)
f_ = f.subs(n, n_)
re, im = f_.as_real_imag()
im = expand_complex(im)
return imageset(Lambda(n_, re),
self.base_set.intersect(
solveset_real(im, n_)))
elif isinstance(other, Interval):
from sympy.solvers.solveset import (invert_real, invert_complex,
solveset)
f = self.lamda.expr
n = self.lamda.variables[0]
base_set = self.base_set
new_inf, new_sup = None, None
if f.is_real:
inverter = invert_real
else:
inverter = invert_complex
g1, h1 = inverter(f, other.inf, n)
g2, h2 = inverter(f, other.sup, n)
if all(isinstance(i, FiniteSet) for i in (h1, h2)):
if g1 == n:
if len(h1) == 1:
new_inf = h1.args[0]
if g2 == n:
if len(h2) == 1:
new_sup = h2.args[0]
# TODO: Design a technique to handle multiple-inverse
# functions
# Any of the new boundary values cannot be determined
if any(i is None for i in (new_sup, new_inf)):
return
range_set = S.EmptySet
if all(i.is_real for i in (new_sup, new_inf)):
new_interval = Interval(new_inf, new_sup)
range_set = base_set._intersect(new_interval)
else:
if other.is_subset(S.Reals):
solutions = solveset(f, n, S.Reals)
if not isinstance(range_set, (ImageSet, ConditionSet)):
range_set = solutions._intersect(other)
else:
return
if range_set is S.EmptySet:
return S.EmptySet
elif isinstance(range_set, Range) and range_set.size is not S.Infinity:
range_set = FiniteSet(*list(range_set))
if range_set is not None:
return imageset(Lambda(n, f), range_set)
return
else:
return
class Range(Set):
"""
Represents a range of integers. Can be called as Range(stop),
Range(start, stop), or Range(start, stop, step); when stop is
not given it defaults to 1.
`Range(stop)` is the same as `Range(0, stop, 1)` and the stop value
(juse as for Python ranges) is not included in the Range values.
>>> from sympy import Range
>>> list(Range(3))
[0, 1, 2]
The step can also be negative:
>>> list(Range(10, 0, -2))
[10, 8, 6, 4, 2]
The stop value is made canonical so equivalent ranges always
have the same args:
>>> Range(0, 10, 3)
Range(0, 12, 3)
Infinite ranges are allowed. If the starting point is infinite,
then the final value is ``stop - step``. To iterate such a range,
it needs to be reversed:
>>> from sympy import oo
>>> r = Range(-oo, 1)
>>> r[-1]
0
>>> next(iter(r))
Traceback (most recent call last):
...
ValueError: Cannot iterate over Range with infinite start
>>> next(iter(r.reversed))
0
Although Range is a set (and supports the normal set
operations) it maintains the order of the elements and can
be used in contexts where `range` would be used.
>>> from sympy import Interval
>>> Range(0, 10, 2).intersect(Interval(3, 7))
Range(4, 8, 2)
>>> list(_)
[4, 6]
Athough slicing of a Range will always return a Range -- possibly
empty -- an empty set will be returned from any intersection that
is empty:
>>> Range(3)[:0]
Range(0, 0, 1)
>>> Range(3).intersect(Interval(4, oo))
EmptySet()
>>> Range(3).intersect(Range(4, oo))
EmptySet()
"""
is_iterable = True
def __new__(cls, *args):
from sympy.functions.elementary.integers import ceiling
if len(args) == 1:
if isinstance(args[0], range if PY3 else xrange):
args = args[0].__reduce__()[1] # use pickle method
# expand range
slc = slice(*args)
if slc.step == 0:
raise ValueError("step cannot be 0")
start, stop, step = slc.start or 0, slc.stop, slc.step or 1
try:
start, stop, step = [
w if w in [S.NegativeInfinity, S.Infinity]
else sympify(as_int(w))
for w in (start, stop, step)]
except ValueError:
raise ValueError(filldedent('''
Finite arguments to Range must be integers; `imageset` can define
other cases, e.g. use `imageset(i, i/10, Range(3))` to give
[0, 1/10, 1/5].'''))
if not step.is_Integer:
raise ValueError(filldedent('''
Ranges must have a literal integer step.'''))
if all(i.is_infinite for i in (start, stop)):
if start == stop:
# canonical null handled below
start = stop = S.One
else:
raise ValueError(filldedent('''
Either the start or end value of the Range must be finite.'''))
if start.is_infinite:
end = stop
else:
ref = start if start.is_finite else stop
n = ceiling((stop - ref)/step)
if n <= 0:
# null Range
start = end = 0
step = 1
else:
end = ref + n*step
return Basic.__new__(cls, start, end, step)
start = property(lambda self: self.args[0])
stop = property(lambda self: self.args[1])
step = property(lambda self: self.args[2])
@property
def reversed(self):
"""Return an equivalent Range in the opposite order.
Examples
========
>>> from sympy import Range
>>> Range(10).reversed
Range(9, -1, -1)
"""
if not self:
return self
return self.func(
self.stop - self.step, self.start - self.step, -self.step)
def _intersect(self, other):
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.complexes import sign
if other is S.Naturals:
return self._intersect(Interval(1, S.Infinity))
if other is S.Integers:
return self
if other.is_Interval:
if not all(i.is_number for i in other.args[:2]):
return
# In case of null Range, return an EmptySet.
if self.size == 0:
return S.EmptySet
# trim down to self's size, and represent
# as a Range with step 1.
start = ceiling(max(other.inf, self.inf))
if start not in other:
start += 1
end = floor(min(other.sup, self.sup))
if end not in other:
end -= 1
return self.intersect(Range(start, end + 1))
if isinstance(other, Range):
from sympy.solvers.diophantine import diop_linear
from sympy.core.numbers import ilcm
# non-overlap quick exits
if not other:
return S.EmptySet
if not self:
return S.EmptySet
if other.sup < self.inf:
return S.EmptySet
if other.inf > self.sup:
return S.EmptySet
# work with finite end at the start
r1 = self
if r1.start.is_infinite:
r1 = r1.reversed
r2 = other
if r2.start.is_infinite:
r2 = r2.reversed
# this equation represents the values of the Range;
# it's a linear equation
eq = lambda r, i: r.start + i*r.step
# we want to know when the two equations might
# have integer solutions so we use the diophantine
# solver
a, b = diop_linear(eq(r1, Dummy()) - eq(r2, Dummy()))
# check for no solution
no_solution = a is None and b is None
if no_solution:
return S.EmptySet
# there is a solution
# -------------------
# find the coincident point, c
a0 = a.as_coeff_Add()[0]
c = eq(r1, a0)
# find the first point, if possible, in each range
# since c may not be that point
def _first_finite_point(r1, c):
if c == r1.start:
return c
# st is the signed step we need to take to
# get from c to r1.start
st = sign(r1.start - c)*step
# use Range to calculate the first point:
# we want to get as close as possible to
# r1.start; the Range will not be null since
# it will at least contain c
s1 = Range(c, r1.start + st, st)[-1]
if s1 == r1.start:
pass
else:
# if we didn't hit r1.start then, if the
# sign of st didn't match the sign of r1.step
# we are off by one and s1 is not in r1
if sign(r1.step) != sign(st):
s1 -= st
if s1 not in r1:
return
return s1
# calculate the step size of the new Range
step = abs(ilcm(r1.step, r2.step))
s1 = _first_finite_point(r1, c)
if s1 is None:
return S.EmptySet
s2 = _first_finite_point(r2, c)
if s2 is None:
return S.EmptySet
# replace the corresponding start or stop in
# the original Ranges with these points; the
# result must have at least one point since
# we know that s1 and s2 are in the Ranges
def _updated_range(r, first):
st = sign(r.step)*step
if r.start.is_finite:
rv = Range(first, r.stop, st)
else:
rv = Range(r.start, first + st, st)
return rv
r1 = _updated_range(self, s1)
r2 = _updated_range(other, s2)
# work with them both in the increasing direction
if sign(r1.step) < 0:
r1 = r1.reversed
if sign(r2.step) < 0:
r2 = r2.reversed
# return clipped Range with positive step; it
# can't be empty at this point
start = max(r1.start, r2.start)
stop = min(r1.stop, r2.stop)
return Range(start, stop, step)
else:
return
def _contains(self, other):
if not self:
return S.false
if other.is_infinite:
return S.false
if not other.is_integer:
return other.is_integer
ref = self.start if self.start.is_finite else self.stop
if (ref - other) % self.step: # off sequence
return S.false
return _sympify(other >= self.inf and other <= self.sup)
def __iter__(self):
if self.start in [S.NegativeInfinity, S.Infinity]:
raise ValueError("Cannot iterate over Range with infinite start")
elif self:
i = self.start
step = self.step
while True:
if (step > 0 and not (self.start <= i < self.stop)) or \
(step < 0 and not (self.stop < i <= self.start)):
break
yield i
i += step
def __len__(self):
if not self:
return 0
dif = self.stop - self.start
if dif.is_infinite:
raise ValueError(
"Use .size to get the length of an infinite Range")
return abs(dif//self.step)
@property
def size(self):
try:
return _sympify(len(self))
except ValueError:
return S.Infinity
def __nonzero__(self):
return self.start != self.stop
__bool__ = __nonzero__
def __getitem__(self, i):
from sympy.functions.elementary.integers import ceiling
ooslice = "cannot slice from the end with an infinite value"
zerostep = "slice step cannot be zero"
# if we had to take every other element in the following
# oo, ..., 6, 4, 2, 0
# we might get oo, ..., 4, 0 or oo, ..., 6, 2
ambiguous = "cannot unambiguously re-stride from the end " + \
"with an infinite value"
if isinstance(i, slice):
if self.size.is_finite:
start, stop, step = i.indices(self.size)
n = ceiling((stop - start)/step)
if n <= 0:
return Range(0)
canonical_stop = start + n*step
end = canonical_stop - step
ss = step*self.step
return Range(self[start], self[end] + ss, ss)
else: # infinite Range
start = i.start
stop = i.stop
if i.step == 0:
raise ValueError(zerostep)
step = i.step or 1
ss = step*self.step
#---------------------
# handle infinite on right
# e.g. Range(0, oo) or Range(0, -oo, -1)
# --------------------
if self.stop.is_infinite:
# start and stop are not interdependent --
# they only depend on step --so we use the
# equivalent reversed values
return self.reversed[
stop if stop is None else -stop + 1:
start if start is None else -start:
step].reversed
#---------------------
# handle infinite on the left
# e.g. Range(oo, 0, -1) or Range(-oo, 0)
# --------------------
# consider combinations of
# start/stop {== None, < 0, == 0, > 0} and
# step {< 0, > 0}
if start is None:
if stop is None:
if step < 0:
return Range(self[-1], self.start, ss)
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step < 0:
return Range(self[-1], self[stop], ss)
else: # > 0
return Range(self.start, self[stop], ss)
elif stop == 0:
if step > 0:
return Range(0)
else: # < 0
raise ValueError(ooslice)
elif stop == 1:
if step > 0:
raise ValueError(ooslice) # infinite singleton
else: # < 0
raise ValueError(ooslice)
else: # > 1
raise ValueError(ooslice)
elif start < 0:
if stop is None:
if step < 0:
return Range(self[start], self.start, ss)
else: # > 0
return Range(self[start], self.stop, ss)
elif stop < 0:
return Range(self[start], self[stop], ss)
elif stop == 0:
if step < 0:
raise ValueError(ooslice)
else: # > 0
return Range(0)
elif stop > 0:
raise ValueError(ooslice)
elif start == 0:
if stop is None:
if step < 0:
raise ValueError(ooslice) # infinite singleton
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step > 1:
raise ValueError(ambiguous)
elif step == 1:
return Range(self.start, self[stop], ss)
else: # < 0
return Range(0)
else: # >= 0
raise ValueError(ooslice)
elif start > 0:
raise ValueError(ooslice)
else:
if not self:
raise IndexError('Range index out of range')
if i == 0:
return self.start
if i == -1 or i is S.Infinity:
return self.stop - self.step
rv = (self.stop if i < 0 else self.start) + i*self.step
if rv.is_infinite:
raise ValueError(ooslice)
if rv < self.inf or rv > self.sup:
raise IndexError("Range index out of range")
return rv
def _eval_imageset(self, f):
from sympy.core.function import expand_mul
if not self:
return S.EmptySet
if not isinstance(f.expr, Expr):
return
if self.size == 1:
return FiniteSet(f(self[0]))
if f is S.IdentityFunction:
return self
x = f.variables[0]
expr = f.expr
# handle f that is linear in f's variable
if x not in expr.free_symbols or x in expr.diff(x).free_symbols:
return
if self.start.is_finite:
F = f(self.step*x + self.start) # for i in range(len(self))
else:
F = f(-self.step*x + self[-1])
F = expand_mul(F)
if F != expr:
return imageset(x, F, Range(self.size))
@property
def _inf(self):
if not self:
raise NotImplementedError
if self.step > 0:
return self.start
else:
return self.stop - self.step
@property
def _sup(self):
if not self:
raise NotImplementedError
if self.step > 0:
return self.stop - self.step
else:
return self.start
@property
def _boundary(self):
return self
if PY3:
converter[range] = Range
else:
converter[xrange] = Range
def normalize_theta_set(theta):
"""
Normalize a Real Set `theta` in the Interval [0, 2*pi). It returns
a normalized value of theta in the Set. For Interval, a maximum of
one cycle [0, 2*pi], is returned i.e. for theta equal to [0, 10*pi],
returned normalized value would be [0, 2*pi). As of now intervals
with end points as non-multiples of `pi` is not supported.
Raises
======
NotImplementedError
The algorithms for Normalizing theta Set are not yet
implemented.
ValueError
The input is not valid, i.e. the input is not a real set.
RuntimeError
It is a bug, please report to the github issue tracker.
Examples
========
>>> from sympy.sets.fancysets import normalize_theta_set
>>> from sympy import Interval, FiniteSet, pi
>>> normalize_theta_set(Interval(9*pi/2, 5*pi))
[pi/2, pi]
>>> normalize_theta_set(Interval(-3*pi/2, pi/2))
[0, 2*pi)
>>> normalize_theta_set(Interval(-pi/2, pi/2))
[0, pi/2] U [3*pi/2, 2*pi)
>>> normalize_theta_set(Interval(-4*pi, 3*pi))
[0, 2*pi)
>>> normalize_theta_set(Interval(-3*pi/2, -pi/2))
[pi/2, 3*pi/2]
>>> normalize_theta_set(FiniteSet(0, pi, 3*pi))
{0, pi}
"""
from sympy.functions.elementary.trigonometric import _pi_coeff as coeff
if theta.is_Interval:
interval_len = theta.measure
# one complete circle
if interval_len >= 2*S.Pi:
if interval_len == 2*S.Pi and theta.left_open and theta.right_open:
k = coeff(theta.start)
return Union(Interval(0, k*S.Pi, False, True),
Interval(k*S.Pi, 2*S.Pi, True, True))
return Interval(0, 2*S.Pi, False, True)
k_start, k_end = coeff(theta.start), coeff(theta.end)
if k_start is None or k_end is None:
raise NotImplementedError("Normalizing theta without pi as coefficient is "
"not yet implemented")
new_start = k_start*S.Pi
new_end = k_end*S.Pi
if new_start > new_end:
return Union(Interval(S.Zero, new_end, False, theta.right_open),
Interval(new_start, 2*S.Pi, theta.left_open, True))
else:
return Interval(new_start, new_end, theta.left_open, theta.right_open)
elif theta.is_FiniteSet:
new_theta = []
for element in theta:
k = coeff(element)
if k is None:
raise NotImplementedError('Normalizing theta without pi as '
'coefficient, is not Implemented.')
else:
new_theta.append(k*S.Pi)
return FiniteSet(*new_theta)
elif theta.is_Union:
return Union(*[normalize_theta_set(interval) for interval in theta.args])
elif theta.is_subset(S.Reals):
raise NotImplementedError("Normalizing theta when, it is of type %s is not "
"implemented" % type(theta))
else:
raise ValueError(" %s is not a real set" % (theta))
class ComplexRegion(Set):
"""
Represents the Set of all Complex Numbers. It can represent a
region of Complex Plane in both the standard forms Polar and
Rectangular coordinates.
* Polar Form
Input is in the form of the ProductSet or Union of ProductSets
of the intervals of r and theta, & use the flag polar=True.
Z = {z in C | z = r*[cos(theta) + I*sin(theta)], r in [r], theta in [theta]}
* Rectangular Form
Input is in the form of the ProductSet or Union of ProductSets
of interval of x and y the of the Complex numbers in a Plane.
Default input type is in rectangular form.
Z = {z in C | z = x + I*y, x in [Re(z)], y in [Im(z)]}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion
>>> from sympy.sets import Interval
>>> from sympy import S, I, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 6)
>>> c = Interval(1, 8)
>>> c1 = ComplexRegion(a*b) # Rectangular Form
>>> c1
ComplexRegion([2, 3] x [4, 6], False)
* c1 represents the rectangular region in complex plane
surrounded by the coordinates (2, 4), (3, 4), (3, 6) and
(2, 6), of the four vertices.
>>> c2 = ComplexRegion(Union(a*b, b*c))
>>> c2
ComplexRegion([2, 3] x [4, 6] U [4, 6] x [1, 8], False)
* c2 represents the Union of two rectangular regions in complex
plane. One of them surrounded by the coordinates of c1 and
other surrounded by the coordinates (4, 1), (6, 1), (6, 8) and
(4, 8).
>>> 2.5 + 4.5*I in c1
True
>>> 2.5 + 6.5*I in c1
False
>>> r = Interval(0, 1)
>>> theta = Interval(0, 2*S.Pi)
>>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form
>>> c2 # unit Disk
ComplexRegion([0, 1] x [0, 2*pi), True)
* c2 represents the region in complex plane inside the
Unit Disk centered at the origin.
>>> 0.5 + 0.5*I in c2
True
>>> 1 + 2*I in c2
False
>>> unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True)
>>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True)
>>> intersection = unit_disk.intersect(upper_half_unit_disk)
>>> intersection
ComplexRegion([0, 1] x [0, pi], True)
>>> intersection == upper_half_unit_disk
True
See Also
========
Reals
"""
is_ComplexRegion = True
def __new__(cls, sets, polar=False):
from sympy import sin, cos
x, y, r, theta = symbols('x, y, r, theta', cls=Dummy)
I = S.ImaginaryUnit
polar = sympify(polar)
# Rectangular Form
if polar == False:
if all(_a.is_FiniteSet for _a in sets.args) and (len(sets.args) == 2):
# ** ProductSet of FiniteSets in the Complex Plane. **
# For Cases like ComplexRegion({2, 4}*{3}), It
# would return {2 + 3*I, 4 + 3*I}
complex_num = []
for x in sets.args[0]:
for y in sets.args[1]:
complex_num.append(x + I*y)
obj = FiniteSet(*complex_num)
else:
obj = ImageSet.__new__(cls, Lambda((x, y), x + I*y), sets)
obj._variables = (x, y)
obj._expr = x + I*y
# Polar Form
elif polar == True:
new_sets = []
# sets is Union of ProductSets
if not sets.is_ProductSet:
for k in sets.args:
new_sets.append(k)
# sets is ProductSets
else:
new_sets.append(sets)
# Normalize input theta
for k, v in enumerate(new_sets):
from sympy.sets import ProductSet
new_sets[k] = ProductSet(v.args[0],
normalize_theta_set(v.args[1]))
sets = Union(*new_sets)
obj = ImageSet.__new__(cls, Lambda((r, theta),
r*(cos(theta) + I*sin(theta))),
sets)
obj._variables = (r, theta)
obj._expr = r*(cos(theta) + I*sin(theta))
else:
raise ValueError("polar should be either True or False")
obj._sets = sets
obj._polar = polar
return obj
@property
def sets(self):
"""
Return raw input sets to the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.sets
[2, 3] x [4, 5]
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.sets
[2, 3] x [4, 5] U [4, 5] x [1, 7]
"""
return self._sets
@property
def args(self):
return (self._sets, self._polar)
@property
def variables(self):
return self._variables
@property
def expr(self):
return self._expr
@property
def psets(self):
"""
Return a tuple of sets (ProductSets) input of the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.psets
([2, 3] x [4, 5],)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.psets
([2, 3] x [4, 5], [4, 5] x [1, 7])
"""
if self.sets.is_ProductSet:
psets = ()
psets = psets + (self.sets, )
else:
psets = self.sets.args
return psets
@property
def a_interval(self):
"""
Return the union of intervals of `x` when, self is in
rectangular form, or the union of intervals of `r` when
self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.a_interval
[2, 3]
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.a_interval
[2, 3] U [4, 5]
"""
a_interval = []
for element in self.psets:
a_interval.append(element.args[0])
a_interval = Union(*a_interval)
return a_interval
@property
def b_interval(self):
"""
Return the union of intervals of `y` when, self is in
rectangular form, or the union of intervals of `theta`
when self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.b_interval
[4, 5]
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.b_interval
[1, 7]
"""
b_interval = []
for element in self.psets:
b_interval.append(element.args[1])
b_interval = Union(*b_interval)
return b_interval
@property
def polar(self):
"""
Returns True if self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union, S
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> theta = Interval(0, 2*S.Pi)
>>> C1 = ComplexRegion(a*b)
>>> C1.polar
False
>>> C2 = ComplexRegion(a*theta, polar=True)
>>> C2.polar
True
"""
return self._polar
@property
def _measure(self):
"""
The measure of self.sets.
Examples
========
>>> from sympy import Interval, ComplexRegion, S
>>> a, b = Interval(2, 5), Interval(4, 8)
>>> c = Interval(0, 2*S.Pi)
>>> c1 = ComplexRegion(a*b)
>>> c1.measure
12
>>> c2 = ComplexRegion(a*c, polar=True)
>>> c2.measure
6*pi
"""
return self.sets._measure
def _contains(self, other):
from sympy.functions import arg, Abs
from sympy.core.containers import Tuple
other = sympify(other)
isTuple = isinstance(other, Tuple)
if isTuple and len(other) != 2:
raise ValueError('expecting Tuple of length 2')
# self in rectangular form
if not self.polar:
re, im = other if isTuple else other.as_real_imag()
for element in self.psets:
if And(element.args[0]._contains(re),
element.args[1]._contains(im)):
return True
return False
# self in polar form
elif self.polar:
if isTuple:
r, theta = other
elif other.is_zero:
r, theta = S.Zero, S.Zero
else:
r, theta = Abs(other), arg(other)
for element in self.psets:
if And(element.args[0]._contains(r),
element.args[1]._contains(theta)):
return True
return False
def _intersect(self, other):
if other.is_ComplexRegion:
# self in rectangular form
if (not self.polar) and (not other.polar):
return ComplexRegion(Intersection(self.sets, other.sets))
# self in polar form
elif self.polar and other.polar:
r1, theta1 = self.a_interval, self.b_interval
r2, theta2 = other.a_interval, other.b_interval
new_r_interval = Intersection(r1, r2)
new_theta_interval = Intersection(theta1, theta2)
# 0 and 2*Pi means the same
if ((2*S.Pi in theta1 and S.Zero in theta2) or
(2*S.Pi in theta2 and S.Zero in theta1)):
new_theta_interval = Union(new_theta_interval,
FiniteSet(0))
return ComplexRegion(new_r_interval*new_theta_interval,
polar=True)
if other is S.Reals:
return other
if other.is_subset(S.Reals):
new_interval = []
# self in rectangular form
if not self.polar:
for element in self.psets:
if S.Zero in element.args[0]:
new_interval.append(element.args[0])
new_interval = Union(*new_interval)
return Intersection(new_interval, other)
# self in polar form
elif self.polar:
for element in self.psets:
if (0 in element.args[1]) or (S.Pi in element.args[1]):
new_interval.append(element.args[0])
new_interval = Union(*new_interval)
return Intersection(new_interval, other)
def _union(self, other):
if other.is_ComplexRegion:
# self in rectangular form
if (not self.polar) and (not other.polar):
return ComplexRegion(Union(self.sets, other.sets))
# self in polar form
elif self.polar and other.polar:
return ComplexRegion(Union(self.sets, other.sets), polar=True)
if self == S.Complexes:
return self
return None
class Complexes(with_metaclass(Singleton, ComplexRegion)):
def __new__(cls):
return ComplexRegion.__new__(cls, S.Reals*S.Reals)
def __eq__(self, other):
return other == ComplexRegion(S.Reals*S.Reals)
def __hash__(self):
return hash(ComplexRegion(S.Reals*S.Reals))
def __str__(self):
return "S.Complexes"
def __repr__(self):
return "S.Complexes"
|
bsd-3-clause
| -3,314,107,003,832,846,000
| 31.430257
| 90
| 0.499444
| false
| 4.100737
| false
| false
| false
|
awlange/brainsparks
|
src/calrissian/regularization/particle_regularize_l2.py
|
1
|
5014
|
import numpy as np
class ParticleRegularizeL2(object):
"""
L2 regularizer for charges
"""
def __init__(self, coeff_lambda=0.0, zeta=8.0):
self.coeff_lambda = coeff_lambda
self.zeta = zeta
self.n = 1
def cost(self, particle_input, layers):
c = 0.0
# c = np.sum(particle_input.q * particle_input.q)
# # c = np.sum(particle_input.rx * particle_input.rx + particle_input.ry * particle_input.ry + particle_input.rz * particle_input.rz)
# for layer in layers:
# # c += np.sum(layer.q * layer.q) + np.sum(layer.b * layer.b)
# # c += np.sum(layer.q * layer.q)
# # c += np.sum(layer.rx * layer.rx + layer.ry * layer.ry + layer.rz * layer.rz)
#
# # Layer inter-particle repulsion
# for i in range(layer.output_size):
# rx_i = layer.rx[i]
# ry_i = layer.ry[i]
# rz_i = layer.rz[i]
# for j in range(i+1, layer.output_size):
# dx = layer.rx[j] - rx_i
# dy = layer.ry[j] - ry_i
# dz = layer.rz[j] - rz_i
# d2 = dx*dx + dy*dy + dz*dz
# c += np.exp(-self.zeta * d2)
#
# n = layer.output_size
# c /= (n * (n-1)) / 2
# # Input layer inter-particle repulsion
# for i in range(particle_input.output_size):
# rx_i = particle_input.rx[i]
# ry_i = particle_input.ry[i]
# rz_i = particle_input.rz[i]
# for j in range(i+1, particle_input.output_size):
# dx = particle_input.rx[j] - rx_i
# dy = particle_input.ry[j] - ry_i
# dz = particle_input.rz[j] - rz_i
# d2 = dx*dx + dy*dy + dz*dz
# c += np.exp(-self.zeta * d2)
# c /= n
# Compute the matrices
r = particle_input.get_rxyz()
for i, layer in enumerate(layers):
w = layer.compute_w(r)
# c += np.sum(w * w)
c += np.mean(w * w)
r = layer.get_rxyz()
return self.coeff_lambda * c
def cost_gradient(self, particle_input, layers, dc_dq, dc_db, dc_dr):
# dc_dr_x = dc_dr[0]
# dc_dr_y = dc_dr[1]
# dc_dr_z = dc_dr[2]
#
# two_lambda = 2.0 * self.coeff_lambda
#
# # # dc_dq[0] += two_lambda * particle_input.q
# # # dc_dr_x[0] += two_lambda * particle_input.rx
# # # dc_dr_y[0] += two_lambda * particle_input.ry
# # # dc_dr_z[0] += two_lambda * particle_input.rz
# for l, layer in enumerate(layers):
# # dc_dq[l] += two_lambda * layer.q
# # dc_db[l] += two_lambda * layer.b
# # dc_dr_x[l+1] += two_lambda * layer.rx
# # dc_dr_y[l+1] += two_lambda * layer.ry
# # dc_dr_z[l+1] += two_lambda * layer.rz
#
# n = layer.output_size
# n = (n * (n-1)) / 2
# for i in range(layer.output_size):
# rx_i = layer.rx[i]
# ry_i = layer.ry[i]
# rz_i = layer.rz[i]
# for j in range(i+1, layer.output_size):
# dx = layer.rx[j] - rx_i
# dy = layer.ry[j] - ry_i
# dz = layer.rz[j] - rz_i
# d2 = dx*dx + dy*dy + dz*dz
# # tmp = two_lambda * self.zeta * np.exp(-self.zeta * d2)
# tmp = two_lambda * self.zeta * np.exp(-self.zeta * d2) / n
# tx = tmp * dx
# ty = tmp * dy
# tz = tmp * dz
#
# dc_dr_x[l+1][i] += tx
# dc_dr_y[l+1][i] += ty
# dc_dr_z[l+1][i] += tz
# dc_dr_x[l+1][j] -= tx
# dc_dr_y[l+1][j] -= ty
# dc_dr_z[l+1][j] -= tz
# #
# # # for i in range(particle_input.output_size):
# # # rx_i = particle_input.rx[i]
# # # ry_i = particle_input.ry[i]
# # # rz_i = particle_input.rz[i]
# # # for j in range(i+1, particle_input.output_size):
# # # dx = particle_input.rx[j] - rx_i
# # # dy = particle_input.ry[j] - ry_i
# # # dz = particle_input.rz[j] - rz_i
# # # d2 = dx*dx + dy*dy + dz*dz
# # # tmp = two_lambda * self.zeta * np.exp(-self.zeta * d2)
# # # tx = tmp * dx
# # # ty = tmp * dy
# # # tz = tmp * dz
# # #
# # # dc_dr_x[0][i] += tx
# # # dc_dr_y[0][i] += ty
# # # dc_dr_z[0][i] += tz
# # # dc_dr_x[0][j] -= tx
# # # dc_dr_y[0][j] -= ty
# # # dc_dr_z[0][j] -= tz
#
# dc_dr = (dc_dr_x, dc_dr_y, dc_dr_z)
return dc_dq, dc_db, dc_dr
|
mit
| 2,553,449,654,175,343,000
| 36.984848
| 141
| 0.406063
| false
| 2.904983
| false
| false
| false
|
NathanW2/qmap
|
src/qmap/floatingtoolbar.py
|
1
|
1226
|
from PyQt4.QtGui import QToolBar, QActionGroup
from PyQt4.QtCore import Qt, QPoint
from utils import log
class FloatingToolBar(QToolBar):
"""
A floating QToolBar with no border and is offset under its parent
"""
def __init__(self, name, parent):
"""
parent: The parent of this toolbar. Should be another toolbar
"""
QToolBar.__init__(self,name, parent)
self.setMovable(False)
self.setWindowFlags(Qt.Tool | Qt.FramelessWindowHint | Qt.X11BypassWindowManagerHint)
self.setAllowedAreas(Qt.NoToolBarArea)
self.actiongroup = QActionGroup(self)
def addToActionGroup(self, action):
self.actiongroup.addAction(action)
def showToolbar(self, parentaction, defaultaction, toggled):
if toggled:
self.show()
if defaultaction:
defaultaction.toggle()
widget = self.parent().widgetForAction(parentaction)
x = self.parent().mapToGlobal(widget.pos()).x()
y = self.parent().mapToGlobal(widget.pos()).y()
newpoint = QPoint(x, y + self.parent().rect().height())
# if self.orientation() == Qt.Vertical:
# newpoint = QPoint(x, y + self.parent().rect().width())
self.move(newpoint)
else:
action = self.actiongroup.checkedAction()
if action:
action.toggle()
self.hide()
|
gpl-2.0
| 2,753,613,497,718,192,000
| 31.263158
| 87
| 0.709625
| false
| 3.111675
| false
| false
| false
|
willrp/willbuyer
|
backend/tests/integration/controller/api/cart/test_select_all_controller.py
|
1
|
1098
|
import pytest
from flask import json
from json.decoder import JSONDecodeError
from backend.util.response.cart import CartSchema
def test_select_all_controller(flask_app, es_create):
prod_list = es_create("products", 2)
item_id = prod_list[0].meta["id"]
item_id_2 = prod_list[1].meta["id"]
with flask_app.test_client() as client:
response = client.get(
"api/cart"
)
with pytest.raises(JSONDecodeError):
json.loads(response.data)
assert response.status_code == 204
with client.session_transaction() as sess:
assert "cart" not in sess
sess["cart"] = {item_id: 1, item_id_2: 2}
assert sess["cart"][item_id] == 1
assert sess["cart"][item_id_2] == 2
response = client.get(
"api/cart"
)
data = json.loads(response.data)
CartSchema().load(data)
assert response.status_code == 200
for item in data["products"]:
assert item["id"] in [item_id, item_id_2]
assert item["amount"] in [1, 2]
|
mit
| -7,319,561,115,062,300,000
| 27.153846
| 53
| 0.577413
| false
| 3.69697
| false
| false
| false
|
philgyford/django-ditto
|
ditto/pinboard/admin.py
|
1
|
2602
|
from django.contrib import admin
from django.db import models
from django.forms import Textarea, TextInput
from taggit.managers import TaggableManager
from taggit.forms import TagWidget
from ..core.admin import DittoItemModelAdmin
from .models import Account, Bookmark
@admin.register(Account)
class AccountAdmin(admin.ModelAdmin):
list_display = (
"username",
"is_active",
"time_created",
"time_modified",
)
fieldsets = (
(None, {"fields": ("username", "url", "is_active",)}),
(
"API",
{
"fields": ("api_token",),
"description": (
"Your API Token can be found at "
'<a href="https://pinboard.in/settings/password">'
"pinboard.in/settings/password</a>"
),
},
),
("Data", {"fields": ("time_created", "time_modified",)}),
)
readonly_fields = (
"time_created",
"time_modified",
)
@admin.register(Bookmark)
class BookmarkAdmin(DittoItemModelAdmin):
list_display = (
"title",
"post_time",
"account",
)
list_filter = (
"post_time",
"is_private",
"to_read",
"account",
)
fieldsets = (
(
None,
{
"fields": (
"account",
"title",
"url",
"description",
"summary",
"tags",
"post_time",
"post_year_str",
"url_hash",
)
},
),
("Options", {"fields": ("is_private", "to_read",)}),
("Data", {"fields": ("raw", "fetch_time", "time_created", "time_modified",)}),
)
formfield_overrides = {
# Make the inputs full-width.
models.CharField: {"widget": TextInput(attrs={"class": "vLargeTextField"})},
# Reduce the number of rows; similar to Pinboard's description field.
models.TextField: {
"widget": Textarea(
attrs={"class": "vLargeTextField", "cols": 40, "rows": 4}
)
},
# Make the input full-width.
TaggableManager: {"widget": TagWidget(attrs={"class": "vLargeTextField"})},
}
readonly_fields = (
"post_year_str",
"raw",
"fetch_time",
"time_created",
"time_modified",
)
search_fields = (
"title",
"url",
"description",
)
|
mit
| 3,542,037,757,899,506,000
| 24.762376
| 86
| 0.468486
| false
| 4.224026
| false
| false
| false
|
xrubio/abm4s3
|
python/experimentTransmission.py
|
1
|
2038
|
#
# Copyright (c) 2015 - Xavier Rubio-Campillo
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version
#
# The source code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python3
import transmission, random
def singleRun():
params = transmission.Params()
params.transmissionType = 'prestige'
params.nAgents = 30
params.nSteps = 100
params.output = 'output.csv'
params.oneFile = False
transmission.run(params)
def experiment():
numRuns = 100
transmissionTypeSweep = ['vertical','encounter','prestige','conformist']
params = transmission.Params()
params.xDim = 10
params.yDim = 10
params.replacementRate = 0.1
params.moveDistance = 1.0
params.interactionRadius = 1.0
params.innovationRate = 0.01
params.nTraits = 3
params.nTraitRange = 5
params.prestigeIndex = 1
params.nSteps = 1000
params.storeAllSteps = True
params.oneFile = False
totalRuns = 0
# perform numRuns of each type, randomly sampling from nAgents 10 to 500
for i in transmissionTypeSweep:
for j in range(0, numRuns):
print('run:',totalRuns+1,'of:',numRuns*len(transmissionTypeSweep))
params.numRun = totalRuns
params.transmissionType = i
params.nAgents = random.randint(50,500)
params.output = 'output_tr_'+str(params.numRun)+'.csv'
totalRuns += 1
transmission.run(params)
def main():
singleRun()
if __name__ == "__main__":
main()
|
gpl-3.0
| 6,615,233,437,712,181,000
| 30.353846
| 79
| 0.675662
| false
| 3.718978
| false
| false
| false
|
topwebmaster/cookiecutter-django
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py
|
1
|
2180
|
{% if cookiecutter.use_celery == 'y' %}
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('{{cookiecutter.project_slug}}')
class CeleryAppConfig(AppConfig):
name = '{{cookiecutter.project_slug}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
{% if cookiecutter.use_sentry == 'y' -%}
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
{% if cookiecutter.use_pycharm == 'y' -%}
# Since raven is required in production only,
# imports might (most surely will) be wiped out
# during PyCharm code clean up started
# in other environments.
# @formatter:off
{%- endif %}
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
{% if cookiecutter.use_pycharm == 'y' -%}
# @formatter:on
{%- endif %}
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['dsn'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
{%- endif %}
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}') # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
|
bsd-3-clause
| 2,726,470,006,636,422,000
| 36.586207
| 99
| 0.658257
| false
| 3.707483
| true
| false
| false
|
zawata/AccessLog
|
dependencies/miscFunc.py
|
1
|
1108
|
'''
A file of small misc. functions
'''
import socket
import subprocess
import httplib2
class Singleton(type):
'''
Singlton Design Pattern metaclass
'''
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def testRoot():
'''
Test Root Access
'''
#run a simple command as root and check if we need a password
p = subprocess.Popen(
'sudo -n echo',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
retval = (p.stdout.readlines()[0].find("sudo: a password is required") == -1)
p.wait()
return retval
def testInternet():
'''
Check Internet Connection
'''
# attempt a connection to google and report success or not
conn = httplib2.HTTPConnectionWithTimeout("www.google.com", timeout=None)
try:
conn.request("HEAD", "/")
return True
except socket.gaierror:
return False
|
gpl-3.0
| -1,916,738,916,484,150,300
| 22.083333
| 81
| 0.609206
| false
| 4.058608
| false
| false
| false
|
dyoussef/s2p
|
s2plib/rectification.py
|
1
|
16836
|
# Copyright (C) 2015, Carlo de Franchis <carlo.de-franchis@cmla.ens-cachan.fr>
# Copyright (C) 2015, Gabriele Facciolo <facciolo@cmla.ens-cachan.fr>
# Copyright (C) 2015, Enric Meinhardt <enric.meinhardt@cmla.ens-cachan.fr>
from __future__ import print_function
import os
import numpy as np
from s2plib import rpc_model
from s2plib import rpc_utils
from s2plib import estimation
from s2plib import evaluation
from s2plib import common
from s2plib import visualisation
from s2plib import block_matching
from s2plib.config import cfg
def filter_matches_epipolar_constraint(F, matches, thresh):
"""
Discards matches that are not consistent with the epipolar constraint.
Args:
F: fundamental matrix
matches: list of pairs of 2D points, stored as a Nx4 numpy array
thresh: maximum accepted distance between a point and its matched
epipolar line
Returns:
the list of matches that satisfy the constraint. It is a sub-list of
the input list.
"""
out = []
for match in matches:
x = np.array([match[0], match[1], 1])
xx = np.array([match[2], match[3], 1])
d1 = evaluation.distance_point_to_line(x, np.dot(F.T, xx))
d2 = evaluation.distance_point_to_line(xx, np.dot(F, x))
if max(d1, d2) < thresh:
out.append(match)
return np.array(out)
def register_horizontally_shear(matches, H1, H2):
"""
Adjust rectifying homographies with tilt, shear and translation to reduce the disparity range.
Args:
matches: list of pairs of 2D points, stored as a Nx4 numpy array
H1, H2: two homographies, stored as numpy 3x3 matrices
Returns:
H2: corrected homography H2
The matches are provided in the original images coordinate system. By
transforming these coordinates with the provided homographies, we obtain
matches whose disparity is only along the x-axis.
"""
# transform the matches according to the homographies
p1 = common.points_apply_homography(H1, matches[:, :2])
x1 = p1[:, 0]
y1 = p1[:, 1]
p2 = common.points_apply_homography(H2, matches[:, 2:])
x2 = p2[:, 0]
y2 = p2[:, 1]
if cfg['debug']:
print("Residual vertical disparities: max, min, mean. Should be zero")
print(np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1))
# we search the (a, b, c) vector that minimises \sum (x1 - (a*x2+b*y2+c))^2
# it is a least squares minimisation problem
A = np.vstack((x2, y2, y2*0+1)).T
a, b, c = np.linalg.lstsq(A, x1)[0].flatten()
# correct H2 with the estimated tilt, shear and translation
return np.dot(np.array([[a, b, c], [0, 1, 0], [0, 0, 1]]), H2)
def register_horizontally_translation(matches, H1, H2, flag='center'):
"""
Adjust rectifying homographies with a translation to modify the disparity range.
Args:
matches: list of pairs of 2D points, stored as a Nx4 numpy array
H1, H2: two homographies, stored as numpy 3x3 matrices
flag: option needed to control how to modify the disparity range:
'center': move the barycenter of disparities of matches to zero
'positive': make all the disparities positive
'negative': make all the disparities negative. Required for
Hirshmuller stereo (java)
Returns:
H2: corrected homography H2
The matches are provided in the original images coordinate system. By
transforming these coordinates with the provided homographies, we obtain
matches whose disparity is only along the x-axis. The second homography H2
is corrected with a horizontal translation to obtain the desired property
on the disparity range.
"""
# transform the matches according to the homographies
p1 = common.points_apply_homography(H1, matches[:, :2])
x1 = p1[:, 0]
y1 = p1[:, 1]
p2 = common.points_apply_homography(H2, matches[:, 2:])
x2 = p2[:, 0]
y2 = p2[:, 1]
# for debug, print the vertical disparities. Should be zero.
if cfg['debug']:
print("Residual vertical disparities: max, min, mean. Should be zero")
print(np.max(y2 - y1), np.min(y2 - y1), np.mean(y2 - y1))
# compute the disparity offset according to selected option
t = 0
if (flag == 'center'):
t = np.mean(x2 - x1)
if (flag == 'positive'):
t = np.min(x2 - x1)
if (flag == 'negative'):
t = np.max(x2 - x1)
# correct H2 with a translation
return np.dot(common.matrix_translation(-t, 0), H2)
def disparity_range_from_matches(matches, H1, H2, w, h):
"""
Compute the disparity range of a ROI from a list of point matches.
The estimation is based on the extrapolation of the affine registration
estimated from the matches. The extrapolation is done on the whole region of
interest.
Args:
matches: Nx4 numpy array containing a list of matches, in the full
image coordinates frame, before rectification
w, h: width and height of the rectangular ROI in the first image.
H1, H2: two rectifying homographies, stored as numpy 3x3 matrices
Returns:
disp_min, disp_max: horizontal disparity range
"""
# transform the matches according to the homographies
p1 = common.points_apply_homography(H1, matches[:, :2])
x1 = p1[:, 0]
p2 = common.points_apply_homography(H2, matches[:, 2:])
x2 = p2[:, 0]
y2 = p2[:, 1]
# compute the final disparity range
disp_min = np.floor(np.min(x2 - x1))
disp_max = np.ceil(np.max(x2 - x1))
# add a security margin to the disparity range
disp_min *= (1 - np.sign(disp_min) * cfg['disp_range_extra_margin'])
disp_max *= (1 + np.sign(disp_max) * cfg['disp_range_extra_margin'])
return disp_min, disp_max
def disparity_range(rpc1, rpc2, x, y, w, h, H1, H2, matches, A=None):
"""
Compute the disparity range of a ROI from a list of point matches.
The estimation is based on the extrapolation of the affine registration
estimated from the matches. The extrapolation is done on the whole region of
interest.
Args:
rpc1, rpc2: two instances of the rpc_model.RPCModel class
x, y, w, h: four integers defining the rectangular ROI in the first
image. (x, y) is the top-left corner, and (w, h) are the dimensions
of the rectangle.
H1, H2: two rectifying homographies, stored as numpy 3x3 matrices
matches: Nx4 numpy array containing a list of sift matches, in the full
image coordinates frame
A (optional): 3x3 numpy array containing the pointing error correction
for im2. This matrix is usually estimated with the pointing_accuracy
module.
Returns:
disp: 2-uple containing the horizontal disparity range
"""
# Default disparity range to return if everything else breaks
disp = (-3,3)
exogenous_disp = None
sift_disp = None
alt_disp = None
# Compute exogenous disparity range if needed
if (cfg['disp_range_method'] in ['exogenous', 'wider_sift_exogenous']):
exogenous_disp = rpc_utils.exogenous_disp_range_estimation(rpc1, rpc2, x, y, w, h,
H1, H2, A,
cfg['disp_range_exogenous_high_margin'],
cfg['disp_range_exogenous_low_margin'])
print("exogenous disparity range: [%f, %f]" % (exogenous_disp[0], exogenous_disp[1]))
# Compute SIFT disparity range if needed
if (cfg['disp_range_method'] in ['sift', 'wider_sift_exogenous']):
if matches is not None and len(matches)>=2:
sift_disp = disparity_range_from_matches(matches, H1, H2, w, h)
print("SIFT disparity range: [%f, %f]" % (sift_disp[0], sift_disp[1]))
else:
print("No SIFT available, SIFT disparity can not be estimated")
# Compute altitude range disparity if needed
if cfg['disp_range_method'] == 'fixed_altitude_range':
if cfg['alt_min'] is not None and cfg['alt_max'] is not None:
alt_disp = rpc_utils.altitude_range_to_disp_range(cfg['alt_min'],
cfg['alt_max'],
rpc1, rpc2,
x, y, w, h,
H1, H2, A)
print("Altitude fixed disparity range: [%f, %f]" % (alt_disp[0], alt_disp[1]))
# Now, compute disparity range according to selected method
if cfg['disp_range_method'] == 'exogenous':
if exogenous_disp is not None:
disp = exogenous_disp
elif cfg['disp_range_method'] == 'sift':
if sift_disp is not None:
disp = sift_disp
elif cfg['disp_range_method'] == 'wider_sift_exogenous':
if sift_disp is not None and exogenous_disp is not None:
disp = min(exogenous_disp[0], sift_disp[0]), max(exogenous_disp[1], sift_disp[1])
else:
if sift_disp is not None:
disp = sift_disp
else:
disp = exogenous_disp
elif cfg['disp_range_method'] == 'fixed_pixel_range':
if cfg['disp_min'] is not None and cfg['disp_max'] is not None:
disp = cfg['disp_min'], cfg['disp_max']
elif cfg['disp_range_method'] == 'fixed_altitude_range':
disp = alt_disp
# impose a minimal disparity range (TODO this is valid only with the
# 'center' flag for register_horizontally_translation)
disp = min(-3, disp[0]), max( 3, disp[1])
print("Final disparity range: [%f, %f]" % (disp[0], disp[1]))
return disp
def rectification_homographies(matches, x, y, w, h):
"""
Computes rectifying homographies from point matches for a given ROI.
The affine fundamental matrix F is estimated with the gold-standard
algorithm, then two rectifying similarities (rotation, zoom, translation)
are computed directly from F.
Args:
matches: numpy array of shape (n, 4) containing a list of 2D point
correspondences between the two images.
x, y, w, h: four integers defining the rectangular ROI in the first
image. (x, y) is the top-left corner, and (w, h) are the dimensions
of the rectangle.
Returns:
S1, S2, F: three numpy arrays of shape (3, 3) representing the
two rectifying similarities to be applied to the two images and the
corresponding affine fundamental matrix.
"""
# estimate the affine fundamental matrix with the Gold standard algorithm
F = estimation.affine_fundamental_matrix(matches)
# compute rectifying similarities
S1, S2 = estimation.rectifying_similarities_from_affine_fundamental_matrix(F, cfg['debug'])
if cfg['debug']:
y1 = common.points_apply_homography(S1, matches[:, :2])[:, 1]
y2 = common.points_apply_homography(S2, matches[:, 2:])[:, 1]
err = np.abs(y1 - y2)
print("max, min, mean rectification error on point matches: ", end=' ')
print(np.max(err), np.min(err), np.mean(err))
# pull back top-left corner of the ROI to the origin (plus margin)
pts = common.points_apply_homography(S1, [[x, y], [x+w, y], [x+w, y+h], [x, y+h]])
x0, y0 = common.bounding_box2D(pts)[:2]
T = common.matrix_translation(-x0, -y0)
return np.dot(T, S1), np.dot(T, S2), F
def rectify_pair(im1, im2, rpc1, rpc2, x, y, w, h, out1, out2, A=None,
sift_matches=None, method='rpc', hmargin=0, vmargin=0):
"""
Rectify a ROI in a pair of images.
Args:
im1, im2: paths to two image files
rpc1, rpc2: paths to the two xml files containing RPC data
x, y, w, h: four integers defining the rectangular ROI in the first
image. (x, y) is the top-left corner, and (w, h) are the dimensions
of the rectangle.
out1, out2: paths to the output rectified crops
A (optional): 3x3 numpy array containing the pointing error correction
for im2. This matrix is usually estimated with the pointing_accuracy
module.
sift_matches (optional): Nx4 numpy array containing a list of sift
matches, in the full image coordinates frame
method (default: 'rpc'): option to decide wether to use rpc of sift
matches for the fundamental matrix estimation.
{h,v}margin (optional): horizontal and vertical margins added on the
sides of the rectified images
Returns:
H1, H2: Two 3x3 matrices representing the rectifying homographies that
have been applied to the two original (large) images.
disp_min, disp_max: horizontal disparity range
"""
# read RPC data
rpc1 = rpc_model.RPCModel(rpc1)
rpc2 = rpc_model.RPCModel(rpc2)
# compute real or virtual matches
if method == 'rpc':
# find virtual matches from RPC camera models
matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h,
cfg['n_gcp_per_axis'])
# correct second image coordinates with the pointing correction matrix
if A is not None:
matches[:, 2:] = common.points_apply_homography(np.linalg.inv(A),
matches[:, 2:])
else:
matches = sift_matches
# compute rectifying homographies
H1, H2, F = rectification_homographies(matches, x, y, w, h)
if cfg['register_with_shear']:
# compose H2 with a horizontal shear to reduce the disparity range
a = np.mean(rpc_utils.altitude_range(rpc1, x, y, w, h))
lon, lat, alt = rpc_utils.ground_control_points(rpc1, x, y, w, h, a, a, 4)
x1, y1 = rpc1.inverse_estimate(lon, lat, alt)[:2]
x2, y2 = rpc2.inverse_estimate(lon, lat, alt)[:2]
m = np.vstack([x1, y1, x2, y2]).T
m = np.vstack({tuple(row) for row in m}) # remove duplicates due to no alt range
H2 = register_horizontally_shear(m, H1, H2)
# compose H2 with a horizontal translation to center disp range around 0
if sift_matches is not None:
sift_matches = filter_matches_epipolar_constraint(F, sift_matches,
cfg['epipolar_thresh'])
if len(sift_matches) < 10:
print('WARNING: no registration with less than 10 matches')
else:
H2 = register_horizontally_translation(sift_matches, H1, H2)
# compute disparity range
if cfg['debug']:
out_dir = os.path.dirname(out1)
np.savetxt(os.path.join(out_dir, 'sift_matches_disp.txt'),
sift_matches, fmt='%9.3f')
visualisation.plot_matches(im1, im2, rpc1, rpc2, sift_matches, x, y, w, h,
os.path.join(out_dir, 'sift_matches_disp.png'))
disp_m, disp_M = disparity_range(rpc1, rpc2, x, y, w, h, H1, H2,
sift_matches, A)
# recompute hmargin and homographies
hmargin = int(np.ceil(max([hmargin, np.fabs(disp_m), np.fabs(disp_M)])))
T = common.matrix_translation(hmargin, vmargin)
H1, H2 = np.dot(T, H1), np.dot(T, H2)
# compute rectifying homographies for non-epipolar mode (rectify the secondary tile only)
if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
H1_inv = np.linalg.inv(H1)
H1 = np.eye(3) # H1 is replaced by 2-D array with ones on the diagonal and zeros elsewhere
H2 = np.dot(H1_inv,H2)
T = common.matrix_translation(-x + hmargin, -y + vmargin)
H1 = np.dot(T, H1)
H2 = np.dot(T, H2)
# compute output images size
roi = [[x, y], [x+w, y], [x+w, y+h], [x, y+h]]
pts1 = common.points_apply_homography(H1, roi)
x0, y0, w0, h0 = common.bounding_box2D(pts1)
# check that the first homography maps the ROI in the positive quadrant
np.testing.assert_allclose(np.round([x0, y0]), [hmargin, vmargin], atol=.01)
# apply homographies and do the crops
common.image_apply_homography(out1, im1, H1, w0 + 2*hmargin, h0 + 2*vmargin)
common.image_apply_homography(out2, im2, H2, w0 + 2*hmargin, h0 + 2*vmargin)
if block_matching.rectify_secondary_tile_only(cfg['matching_algorithm']):
pts_in = [[0, 0], [disp_m, 0], [disp_M, 0]]
pts_out = common.points_apply_homography(H1_inv,
pts_in)
disp_m = pts_out[1,:] - pts_out[0,:]
disp_M = pts_out[2,:] - pts_out[0,:]
return H1, H2, disp_m, disp_M
|
agpl-3.0
| -1,560,706,240,523,697,700
| 40.985037
| 102
| 0.61125
| false
| 3.43662
| false
| false
| false
|
derwentx/WooGenerator
|
tests/test_parsing_special_v2.py
|
1
|
4047
|
import os
import time
import unittest
from context import get_testdata, TESTS_DATA_DIR, woogenerator
from woogenerator.parsing.special import CsvParseSpecial
from woogenerator.utils import Registrar, TimeUtils
class TestCSVParseSpecialV2(unittest.TestCase):
def setUp(self):
# import_name = TimeUtils.get_ms_timestamp()
self.spec_path = os.path.join(TESTS_DATA_DIR, "specials_v2.csv")
self.special_parser_args = {
# 'import_name':import_name
}
Registrar.DEBUG_ERROR = False
Registrar.DEBUG_WARN = False
Registrar.DEBUG_MESSAGE = False
# Registrar.DEBUG_MESSAGE = True
# Registrar.DEBUG_SPECIAL = True
# Registrar.DEBUG_PARSER = True
def test_basic(self):
special_parser = CsvParseSpecial(
**self.special_parser_args
)
special_parser.analyse_file(self.spec_path)
if Registrar.DEBUG_PARSER:
Registrar.register_message("number of special groups: %s" \
% len(special_parser.rule_groups))
Registrar.register_message("number of special rules: %s" % len(special_parser.rules))
Registrar.register_message(special_parser.tabulate(tablefmt="simple"))
# check that loner has correct ending
is_singular_child = False
for index, special in special_parser.rule_groups.items():
if len(special.children) == 1:
is_singular_child = True
child = special.children[0]
self.assertEqual(index, child.index)
self.assertTrue(is_singular_child)
def test_has_happened_yet(self):
special_parser = CsvParseSpecial(
**self.special_parser_args
)
special_parser.analyse_file(self.spec_path)
TimeUtils.set_override_time(time.strptime(
"2018-01-01", TimeUtils.wp_date_format))
eofy_special = special_parser.rule_groups.get('EOFY2016')
eofy_start_time = TimeUtils.datetime2utctimestamp(eofy_special.start_time)
self.assertLess(eofy_start_time, TimeUtils.current_tsecs())
self.assertTrue(eofy_special.has_started)
self.assertTrue(eofy_special.has_finished)
self.assertFalse(eofy_special.is_active)
def test_determine_groups(self):
special_parser = CsvParseSpecial(
**self.special_parser_args
)
special_parser.analyse_file(self.spec_path)
# Registrar.DEBUG_SPECIAL = True
# Registrar.DEBUG_MESSAGE = True
override_groups = special_parser.determine_current_spec_grps(
'override',
'EOFY2016'
)
self.assertEquals(
override_groups, [special_parser.rule_groups.get('EOFY2016')])
TimeUtils.set_override_time(time.strptime(
"2018-01-01", TimeUtils.wp_date_format))
auto_next_groups = special_parser.determine_current_spec_grps(
'auto_next'
)
self.assertEquals(auto_next_groups, [])
TimeUtils.set_override_time(time.strptime(
"2016-08-11", TimeUtils.wp_date_format))
auto_next_groups = special_parser.determine_current_spec_grps(
'auto_next'
)
self.assertEquals(
auto_next_groups, [special_parser.rule_groups.get('SP2016-08-12')])
TimeUtils.set_override_time(time.strptime(
"2016-06-11", TimeUtils.wp_date_format))
auto_next_groups = special_parser.determine_current_spec_grps(
'auto_next'
)
self.assertEquals(
auto_next_groups, [special_parser.rule_groups.get('EOFY2016')])
TimeUtils.set_override_time(time.strptime(
"2016-06-13", TimeUtils.wp_date_format))
auto_next_groups = special_parser.determine_current_spec_grps(
'auto_next'
)
self.assertEquals(
auto_next_groups, [special_parser.rule_groups.get('EOFY2016')])
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 6,171,959,762,056,906,000
| 31.902439
| 97
| 0.619471
| false
| 3.785781
| true
| false
| false
|
qtproject/qt-creator
|
share/qtcreator/debugger/qttypes.py
|
2
|
103408
|
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import platform
import re
from dumper import *
def qdump__QAtomicInt(d, value):
d.putValue(value.integer())
d.putNumChild(0)
def qdump__QBasicAtomicInt(d, value):
d.putValue(value.integer())
d.putNumChild(0)
def qdump__QAtomicPointer(d, value):
d.putItem(value.cast(value.type[0].pointer()))
d.putBetterType(value.type)
def qform__QByteArray():
return [Latin1StringFormat, SeparateLatin1StringFormat,
Utf8StringFormat, SeparateUtf8StringFormat ]
def qedit__QByteArray(d, value, data):
d.call('void', value, 'resize', str(len(data)))
(base, size, alloc) = d.stringData(value)
d.setValues(base, 'char', [ord(c) for c in data])
def qdump__QByteArray(d, value):
data, size, alloc = d.byteArrayData(value)
d.check(alloc == 0 or (0 <= size and size <= alloc and alloc <= 100000000))
d.putNumChild(size)
elided, p = d.encodeByteArrayHelper(d.extractPointer(value), d.displayStringLimit)
displayFormat = d.currentItemFormat()
if displayFormat == AutomaticFormat or displayFormat == Latin1StringFormat:
d.putValue(p, 'latin1', elided=elided)
elif displayFormat == SeparateLatin1StringFormat:
d.putValue(p, 'latin1', elided=elided)
d.putDisplay('latin1:separate', d.encodeByteArray(value, limit=100000))
elif displayFormat == Utf8StringFormat:
d.putValue(p, 'utf8', elided=elided)
elif displayFormat == SeparateUtf8StringFormat:
d.putValue(p, 'utf8', elided=elided)
d.putDisplay('utf8:separate', d.encodeByteArray(value, limit=100000))
if d.isExpanded():
d.putArrayData(data, size, d.charType())
def qdump__QArrayData(d, value):
data, size, alloc = d.byteArrayDataHelper(value.address())
d.check(alloc == 0 or (0 <= size and size <= alloc and alloc <= 100000000))
d.putValue(d.readMemory(data, size), 'latin1')
d.putNumChild(1)
d.putPlainChildren(value)
def qdump__QByteArrayData(d, value):
qdump__QArrayData(d, value)
def qdump__QBitArray(d, value):
data, basize, alloc = d.byteArrayDataHelper(d.extractPointer(value['d']))
unused = d.extractByte(data)
size = basize * 8 - unused
d.putItemCount(size)
if d.isExpanded():
with Children(d, size, maxNumChild=10000):
for i in d.childRange():
q = data + 1 + int(i / 8)
with SubItem(d, i):
d.putValue((int(d.extractPointer(q)) >> (i % 8)) & 1)
d.putType('bool')
d.putNumChild(0)
def qdump__QChar(d, value):
d.putValue(d.extractUShort(value))
d.putNumChild(0)
def qform_X_QAbstractItemModel():
return [SimpleFormat, EnhancedFormat]
def qdump_X_QAbstractItemModel(d, value):
displayFormat = d.currentItemFormat()
if displayFormat == SimpleFormat:
d.putPlainChildren(value)
return
#displayFormat == EnhancedFormat:
# Create a default-constructed QModelIndex on the stack.
try:
ri = d.pokeValue(d.qtNamespace() + 'QModelIndex', '-1, -1, 0, 0')
this_ = d.makeExpression(value)
ri_ = d.makeExpression(ri)
rowCount = int(d.parseAndEvaluate('%s.rowCount(%s)' % (this_, ri_)))
columnCount = int(d.parseAndEvaluate('%s.columnCount(%s)' % (this_, ri_)))
except:
d.putPlainChildren(value)
return
d.putValue('%d x %d' % (rowCount, columnCount))
d.putNumChild(rowCount * columnCount)
if d.isExpanded():
with Children(d, numChild=rowCount * columnCount, childType=ri.type):
i = 0
for row in xrange(rowCount):
for column in xrange(columnCount):
with SubItem(d, i):
d.putName('[%s, %s]' % (row, column))
mi = d.parseAndEvaluate('%s.index(%d,%d,%s)'
% (this_, row, column, ri_))
d.putItem(mi)
i = i + 1
#gdb.execute('call free($ri)')
def qform_X_QModelIndex():
return [SimpleFormat, EnhancedFormat]
def qdump_X_QModelIndex(d, value):
displayFormat = d.currentItemFormat()
if displayFormat == SimpleFormat:
d.putPlainChildren(value)
return
r = value['r']
c = value['c']
try:
p = value['p']
except:
p = value['i']
m = value['m']
if m.pointer() == 0 or r < 0 or c < 0:
d.putValue('(invalid)')
d.putPlainChildren(value)
return
mm = m.dereference()
mm = mm.cast(mm.type.unqualified())
ns = d.qtNamespace()
try:
mi = d.pokeValue(ns + 'QModelIndex', '%s,%s,%s,%s' % (r, c, p, m))
mm_ = d.makeExpression(mm)
mi_ = d.makeExpression(mi)
rowCount = int(d.parseAndEvaluate('%s.rowCount(%s)' % (mm_, mi_)))
columnCount = int(d.parseAndEvaluate('%s.columnCount(%s)' % (mm_, mi_)))
except:
d.putPlainChildren(value)
return
try:
# Access DisplayRole as value
val = d.parseAndEvaluate('%s.data(%s, 0)' % (mm_, mi_))
v = val['d']['data']['ptr']
d.putStringValue(d.pokeValue(ns + 'QString', v))
except:
d.putValue('')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putFields(value, False)
i = 0
for row in xrange(rowCount):
for column in xrange(columnCount):
with UnnamedSubItem(d, i):
d.putName('[%s, %s]' % (row, column))
mi2 = d.parseAndEvaluate('%s.index(%d,%d,%s)'
% (mm_, row, column, mi_))
d.putItem(mi2)
i = i + 1
d.putCallItem('parent', '@QModelIndex', value, 'parent')
#gdb.execute('call free($mi)')
def qdump__Qt__ItemDataRole(d, value):
d.putEnumValue(value.integer(), {
0 : "Qt::DisplayRole",
1 : "Qt::DecorationRole",
2 : "Qt::EditRole",
3 : "Qt::ToolTipRole",
4 : "Qt::StatusTipRole",
5 : "Qt::WhatsThisRole",
6 : "Qt::FontRole",
7 : "Qt::TextAlignmentRole",
# obsolete: 8 : "Qt::BackgroundColorRole",
8 : "Qt::BackgroundRole",
# obsolete: 9 : "Qt::TextColorRole",
9 : "Qt::ForegroundRole",
10 : "Qt::CheckStateRole",
11 : "Qt::AccessibleTextRole",
12 : "Qt::AccessibleDescriptionRole",
13 : "Qt::SizeHintRole",
14 : "Qt::InitialSortOrderRole",
# 27-31 Qt4 ItemDataRoles
27 : "Qt::DisplayPropertyRole",
28 : "Qt::DecorationPropertyRole",
29 : "Qt::ToolTipPropertyRole",
30 : "Qt::StatusTipPropertyRole",
31 : "Qt::WhatsThisPropertyRole",
0x100 : "Qt::UserRole"
})
def qdump__QStandardItemData(d, value):
role, pad, val = value.split('{@Qt::ItemDataRole}@{QVariant}')
d.putPairContents(role.value(), (role, val), 'role', 'value')
def qdump__QStandardItem(d, value):
vtable, dptr = value.split('pp')
# There used to be a virtual destructor that got removed in
# 88b6abcebf29b455438 on Apr 18 17:01:22 2017
if d.qtVersion() >= 0x050900 or d.isMsvcTarget():
model, parent, values, children, rows, cols, item = d.split('ppPPIIp', dptr)
else:
vtable1, model, parent, values, children, rows, cols, item = d.split('pppPPIIp', dptr)
d.putValue(' ')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putSubItem('[model]', d.createValue(model, '@QStandardItemModel'))
d.putSubItem('[values]', d.createVectorItem(values, '@QStandardItemData'))
d.putSubItem('[children]', d.createVectorItem(children,
d.createPointerType(value.type)))
def qdump__QDate(d, value):
jd = value.pointer()
if jd:
d.putValue(jd, 'juliandate')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
if d.canCallLocale():
d.putCallItem('toString', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'TextDate'))
d.putCallItem('(ISO)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'ISODate'))
d.putCallItem('(SystemLocale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'SystemLocaleDate'))
d.putCallItem('(Locale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'LocaleDate'))
d.putFields(value)
else:
d.putValue('(invalid)')
d.putNumChild(0)
def qdump__QTime(d, value):
mds = value.split('i')[0]
if mds == -1:
d.putValue('(invalid)')
d.putNumChild(0)
return
d.putValue(mds, 'millisecondssincemidnight')
if d.isExpanded():
with Children(d):
d.putCallItem('toString', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'TextDate'))
d.putCallItem('(ISO)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'ISODate'))
if d.canCallLocale():
d.putCallItem('(SystemLocale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'SystemLocaleDate'))
d.putCallItem('(Locale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'LocaleDate'))
d.putFields(value)
def qdump__QTimeZone(d, value):
base = d.extractPointer(value)
if base == 0:
d.putValue('(null)')
d.putNumChild(0)
return
idAddr = base + 2 * d.ptrSize() # [QSharedData] + [vptr]
d.putByteArrayValue(idAddr)
d.putPlainChildren(value['d'])
def qdump__QDateTime(d, value):
qtVersion = d.qtVersion()
isValid = False
# This relies on the Qt4/Qt5 internal structure layout:
# {sharedref(4), ...
base = d.extractPointer(value)
is32bit = d.ptrSize() == 4
if qtVersion >= 0x050200:
tiVersion = d.qtTypeInfoVersion()
#warn('TI VERSION: %s' % tiVersion)
if tiVersion is None:
tiVersion = 4
if tiVersion > 10:
status = d.extractByte(value)
#warn('STATUS: %s' % status)
if status & 0x01:
# Short data
msecs = d.extractUInt64(value) >> 8
spec = (status & 0x30) >> 4
offsetFromUtc = 0
timeZone = 0
isValid = status & 0x08
else:
dptr = d.extractPointer(value)
(msecs, status, offsetFromUtc, ref, timeZone) = d.split('qIIIp', dptr)
spec = (status & 0x30) >> 4
isValid = True
d.putValue('%s/%s/%s/%s/%s/%s' % (msecs, spec, offsetFromUtc, timeZone, status, tiVersion),
'datetimeinternal')
else:
if d.isWindowsTarget():
msecsOffset = 8
specOffset = 16
offsetFromUtcOffset = 20
timeZoneOffset = 24
statusOffset = 28 if is32bit else 32
else:
msecsOffset = 4 if is32bit else 8
specOffset = 12 if is32bit else 16
offsetFromUtcOffset = 16 if is32bit else 20
timeZoneOffset = 20 if is32bit else 24
statusOffset = 24 if is32bit else 32
status = d.extractInt(base + statusOffset)
if int(status & 0x0c == 0x0c): # ValidDate and ValidTime
isValid = True
msecs = d.extractInt64(base + msecsOffset)
spec = d.extractInt(base + specOffset)
offset = d.extractInt(base + offsetFromUtcOffset)
tzp = d.extractPointer(base + timeZoneOffset)
if tzp == 0:
tz = ''
else:
idBase = tzp + 2 * d.ptrSize() # [QSharedData] + [vptr]
elided, tz = d.encodeByteArrayHelper(d.extractPointer(idBase), limit=100)
d.putValue('%s/%s/%s/%s/%s/%s' % (msecs, spec, offset, tz, status, 0),
'datetimeinternal')
else:
# This relies on the Qt4/Qt5 internal structure layout:
# {sharedref(4), date(8), time(4+x)}
# QDateTimePrivate:
# - QAtomicInt ref; (padded on 64 bit)
# - [QDate date;]
# - - uint jd in Qt 4, qint64 in Qt 5.0 and Qt 5.1; padded on 64 bit
# - [QTime time;]
# - - uint mds;
# - Spec spec;
dateSize = 8 if qtVersion >= 0x050000 else 4 # Qt5: qint64, Qt4 uint
# 4 byte padding after 4 byte QAtomicInt if we are on 64 bit and QDate is 64 bit
refPlusPadding = 8 if qtVersion >= 0x050000 and d.ptrSize() == 8 else 4
dateBase = base + refPlusPadding
timeBase = dateBase + dateSize
mds = d.extractInt(timeBase)
isValid = mds > 0
if isValid:
jd = d.extractInt(dateBase)
d.putValue('%s/%s' % (jd, mds), 'juliandateandmillisecondssincemidnight')
if not isValid:
d.putValue('(invalid)')
d.putNumChild(0)
return
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putCallItem('toTime_t', 'unsigned int', value, 'toTime_t')
if d.canCallLocale():
d.putCallItem('toString', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'TextDate'))
d.putCallItem('(ISO)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'ISODate'))
d.putCallItem('toUTC', '@QDateTime', value, 'toTimeSpec',
d.enumExpression('TimeSpec', 'UTC'))
d.putCallItem('(SystemLocale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'SystemLocaleDate'))
d.putCallItem('(Locale)', '@QString', value, 'toString',
d.enumExpression('DateFormat', 'LocaleDate'))
d.putCallItem('toLocalTime', '@QDateTime', value, 'toTimeSpec',
d.enumExpression('TimeSpec', 'LocalTime'))
d.putFields(value)
def qdump__QDir(d, value):
d.putNumChild(1)
privAddress = d.extractPointer(value)
bit32 = d.ptrSize() == 4
qt5 = d.qtVersion() >= 0x050000
# Change 9fc0965 reorders members again.
# bool fileListsInitialized
# QStringList files
# QFileInfoList fileInfos
# QStringList nameFilters
# QDir::SortFlags sort
# QDir::Filters filters
# Before 9fc0965:
# QDirPrivate:
# QAtomicInt ref
# QStringList nameFilters;
# QDir::SortFlags sort;
# QDir::Filters filters;
# // qt3support:
# QChar filterSepChar;
# bool matchAllDirs;
# // end qt3support
# QScopedPointer<QAbstractFileEngine> fileEngine;
# bool fileListsInitialized;
# QStringList files;
# QFileInfoList fileInfos;
# QFileSystemEntry dirEntry;
# QFileSystemEntry absoluteDirEntry;
# QFileSystemEntry:
# QString m_filePath
# QByteArray m_nativeFilePath
# qint16 m_lastSeparator
# qint16 m_firstDotInFileName
# qint16 m_lastDotInFileName
# + 2 byte padding
fileSystemEntrySize = 2 * d.ptrSize() + 8
if d.qtVersion() < 0x050200:
case = 0
elif d.qtVersion() >= 0x050300:
case = 1
else:
# Try to distinguish bool vs QStringList at the first item
# after the (padded) refcount. If it looks like a bool assume
# this is after 9fc0965. This is not safe.
firstValue = d.extractInt(privAddress + d.ptrSize())
case = 1 if firstValue == 0 or firstValue == 1 else 0
if case == 1:
if bit32:
filesOffset = 4
fileInfosOffset = 8
dirEntryOffset = 0x20
absoluteDirEntryOffset = 0x30
else:
filesOffset = 0x08
fileInfosOffset = 0x10
dirEntryOffset = 0x30
absoluteDirEntryOffset = 0x48
else:
# Assume this is before 9fc0965.
qt3support = d.isQt3Support()
qt3SupportAddition = d.ptrSize() if qt3support else 0
filesOffset = (24 if bit32 else 40) + qt3SupportAddition
fileInfosOffset = filesOffset + d.ptrSize()
dirEntryOffset = fileInfosOffset + d.ptrSize()
absoluteDirEntryOffset = dirEntryOffset + fileSystemEntrySize
d.putStringValue(privAddress + dirEntryOffset)
if d.isExpanded():
with Children(d):
if not d.isMsvcTarget():
ns = d.qtNamespace()
d.call('int', value, 'count') # Fill cache.
#d.putCallItem('absolutePath', '@QString', value, 'absolutePath')
#d.putCallItem('canonicalPath', '@QString', value, 'canonicalPath')
with SubItem(d, 'absolutePath'):
typ = d.lookupType(ns + 'QString')
d.putItem(d.createValue(privAddress + absoluteDirEntryOffset, typ))
with SubItem(d, 'entryInfoList'):
typ = d.lookupType(ns + 'QFileInfo')
qdumpHelper_QList(d, privAddress + fileInfosOffset, typ)
with SubItem(d, 'entryList'):
typ = d.lookupType(ns + 'QStringList')
d.putItem(d.createValue(privAddress + filesOffset, typ))
d.putFields(value)
def qdump__QEvent(d, value):
d.putNumChild(1)
if d.isExpanded():
with Children(d):
# Add a sub-item with the event type.
with SubItem(d, '[type]'):
(vtable, privateD, t, flags) = value.split("pp{short}{short}")
event_type_name = d.qtNamespace() + "QEvent::Type"
type_value = t.cast(event_type_name)
d.putValue(type_value.displayEnum('0x%04x', bitsize=16))
d.putType(event_type_name)
d.putNumChild(0)
# Show the rest of the class fields as usual.
d.putFields(value)
def qdump__QKeyEvent(d, value):
# QEvent fields
# virtual table pointer
# QEventPrivate *d;
# ushort t;
# ushort posted : 1;
# ushort spont : 1;
# ushort m_accept : 1;
# ushort reserved : 13;
# QInputEvent fields
# Qt::KeyboardModifiers modState;
# ulong ts;
# QKeyEvent fields
# QString txt;
# int k;
# quint32 nScanCode;
# quint32 nVirtualKey;
# quint32 nModifiers; <- nativeModifiers
# ushort c;
# ushort autor:1;
# ushort reserved:15;
(vtable, privateD, t, flags, modState, ts, txt, k, scanCode,
virtualKey, modifiers,
c, autor) = value.split("ppHHiQ{QString}{int}IIIHH")
#d.putStringValue(txt)
#data = d.encodeString(txt)
key_txt_utf8 = d.encodeStringUtf8(txt)
k_type_name = d.qtNamespace() + "Qt::Key"
k_cast_to_enum_value = k.cast(k_type_name)
k_name = k_cast_to_enum_value.displayEnum(bitsize=32)
matches = re.search(r'Key_(\w+)', k_name)
if matches:
k_name = matches.group(1)
if t == 6:
key_event_type = "Pressed"
elif t == 7:
key_event_type = "Released"
else:
key_event_type = ""
data = ""
if key_event_type:
data += "{} ".format(key_event_type)
# Try to use the name of the enum value, otherwise the value
# of txt in QKeyEvent.
if k_name:
data += "'{}'".format(k_name)
elif key_txt_utf8:
data += "'{}'".format(key_txt_utf8)
else:
data += "<non-ascii>"
k_int = k.integer()
data += " (key:{} vKey:{}".format(k_int, virtualKey)
modifier_list = []
modifier_list.append(("Shift", 0x02000000))
modifier_list.append(("Control", 0x04000000))
modifier_list.append(("Alt", 0x08000000))
modifier_list.append(("Meta", 0x10000000))
# modifier_map.append(("KeyPad", 0x20000000)) Is this useful?
modifier_list.append(("Grp", 0x40000000))
modifiers = []
for modifier_name, mask in modifier_list:
if modState & mask:
modifiers.append(modifier_name)
if modifiers:
data += " mods:" + "+".join(modifiers)
data += ")"
d.putValue(d.hexencode(data), 'utf8')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
# Add a sub-item with the enum name and value.
with SubItem(d, '[{}]'.format(k_type_name)):
k_cast_to_enum_value = k.cast(k_type_name)
d.putValue(k_cast_to_enum_value.displayEnum('0x%04x', bitsize=32))
d.putType(k_type_name)
d.putNumChild(0)
# Show the rest of the class fields as usual.
d.putFields(value, dumpBase=True)
def qdump__QFile(d, value):
# 9fc0965 and a373ffcd change the layout of the private structure
qtVersion = d.qtVersion()
is32bit = d.ptrSize() == 4
if qtVersion >= 0x050600 and d.qtTypeInfoVersion() >= 17:
# Some QRingBuffer member got removed in 8f92baf5c9
if d.isWindowsTarget():
if d.isMsvcTarget():
offset = 164 if is32bit else 224
else:
offset = 160 if is32bit else 224
else:
offset = 156 if is32bit else 224
elif qtVersion >= 0x050700:
if d.isWindowsTarget():
if d.isMsvcTarget():
offset = 176 if is32bit else 248
else:
offset = 172 if is32bit else 248
else:
offset = 168 if is32bit else 248
elif qtVersion >= 0x050600:
if d.isWindowsTarget():
if d.isMsvcTarget():
offset = 184 if is32bit else 248
else:
offset = 180 if is32bit else 248
else:
offset = 168 if is32bit else 248
elif qtVersion >= 0x050500:
if d.isWindowsTarget():
offset = 164 if is32bit else 248
else:
offset = 164 if is32bit else 248
elif qtVersion >= 0x050400:
if d.isWindowsTarget():
offset = 188 if is32bit else 272
else:
offset = 180 if is32bit else 272
elif qtVersion > 0x050200:
if d.isWindowsTarget():
offset = 180 if is32bit else 272
else:
offset = 176 if is32bit else 272
elif qtVersion >= 0x050000:
offset = 176 if is32bit else 280
else:
if d.isWindowsTarget():
offset = 144 if is32bit else 232
else:
offset = 140 if is32bit else 232
vtable, privAddress = value.split('pp')
fileNameAddress = privAddress + offset
d.putStringValue(fileNameAddress)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putCallItem('exists', 'bool', value, 'exists')
d.putFields(value)
def qdump__QFileInfo(d, value):
privAddress = d.extractPointer(value)
#bit32 = d.ptrSize() == 4
#qt5 = d.qtVersion() >= 0x050000
#try:
# d.putStringValue(value['d_ptr']['d'].dereference()['fileNames'][3])
#except:
# d.putPlainChildren(value)
# return
filePathAddress = privAddress + d.ptrSize()
d.putStringValue(filePathAddress)
d.putNumChild(1)
if d.isExpanded():
ns = d.qtNamespace()
with Children(d):
stype = '@QString'
d.putCallItem('absolutePath', stype, value, 'absolutePath')
d.putCallItem('absoluteFilePath', stype, value, 'absoluteFilePath')
d.putCallItem('canonicalPath', stype, value, 'canonicalPath')
d.putCallItem('canonicalFilePath', stype, value, 'canonicalFilePath')
d.putCallItem('completeBaseName', stype, value, 'completeBaseName')
d.putCallItem('completeSuffix', stype, value, 'completeSuffix')
d.putCallItem('baseName', stype, value, 'baseName')
if platform.system() == 'Darwin':
d.putCallItem('isBundle', stype, value, 'isBundle')
d.putCallItem('bundleName', stype, value, 'bundleName')
d.putCallItem('fileName', stype, value, 'fileName')
d.putCallItem('filePath', stype, value, 'filePath')
# Crashes gdb (archer-tromey-python, at dad6b53fe)
#d.putCallItem('group', value, 'group')
#d.putCallItem('owner', value, 'owner')
d.putCallItem('path', stype, value, 'path')
d.putCallItem('groupid', 'unsigned int', value, 'groupId')
d.putCallItem('ownerid', 'unsigned int', value, 'ownerId')
#QFile::Permissions permissions () const
try:
perms = d.call('int', value, 'permissions')
except:
perms = None
if perms is None:
with SubItem(d, 'permissions'):
d.putSpecialValue('notcallable')
d.putType(ns + 'QFile::Permissions')
d.putNumChild(0)
else:
with SubItem(d, 'permissions'):
d.putEmptyValue()
d.putType(ns + 'QFile::Permissions')
d.putNumChild(10)
if d.isExpanded():
with Children(d, 10):
perms = perms['i']
d.putBoolItem('ReadOwner', perms & 0x4000)
d.putBoolItem('WriteOwner', perms & 0x2000)
d.putBoolItem('ExeOwner', perms & 0x1000)
d.putBoolItem('ReadUser', perms & 0x0400)
d.putBoolItem('WriteUser', perms & 0x0200)
d.putBoolItem('ExeUser', perms & 0x0100)
d.putBoolItem('ReadGroup', perms & 0x0040)
d.putBoolItem('WriteGroup', perms & 0x0020)
d.putBoolItem('ExeGroup', perms & 0x0010)
d.putBoolItem('ReadOther', perms & 0x0004)
d.putBoolItem('WriteOther', perms & 0x0002)
d.putBoolItem('ExeOther', perms & 0x0001)
#QDir absoluteDir () const
#QDir dir () const
d.putCallItem('caching', 'bool', value, 'caching')
d.putCallItem('exists', 'bool', value, 'exists')
d.putCallItem('isAbsolute', 'bool', value, 'isAbsolute')
d.putCallItem('isDir', 'bool', value, 'isDir')
d.putCallItem('isExecutable', 'bool', value, 'isExecutable')
d.putCallItem('isFile', 'bool', value, 'isFile')
d.putCallItem('isHidden', 'bool', value, 'isHidden')
d.putCallItem('isReadable', 'bool', value, 'isReadable')
d.putCallItem('isRelative', 'bool', value, 'isRelative')
d.putCallItem('isRoot', 'bool', value, 'isRoot')
d.putCallItem('isSymLink', 'bool', value, 'isSymLink')
d.putCallItem('isWritable', 'bool', value, 'isWritable')
d.putCallItem('created', 'bool', value, 'created')
d.putCallItem('lastModified', 'bool', value, 'lastModified')
d.putCallItem('lastRead', 'bool', value, 'lastRead')
d.putFields(value)
def qdump__QFixed(d, value):
v = value.split('i')[0]
d.putValue('%s/64 = %s' % (v, v/64.0))
d.putNumChild(0)
def qform__QFiniteStack():
return arrayForms()
def qdump__QFiniteStack(d, value):
array, alloc, size = value.split('pii')
d.check(0 <= size and size <= alloc and alloc <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putPlotData(array, size, value.type[0])
def qdump__QFlags(d, value):
i = value.split('{int}')[0]
enumType = value.type[0]
v = i.cast(enumType.name)
d.putValue(v.displayEnum('0x%04x', bitsize=32))
d.putNumChild(0)
def qform__QHash():
return mapForms()
def qdump__QHash(d, value):
qdumpHelper_QHash(d, value, value.type[0], value.type[1])
def qdump__QVariantHash(d, value):
qdumpHelper_QHash(d, value, d.createType('QString'), d.createType('QVariant'))
def qdumpHelper_QHash(d, value, keyType, valueType):
def hashDataFirstNode():
b = buckets
n = numBuckets
while n:
n -= 1
bb = d.extractPointer(b)
if bb != dptr:
return bb
b += ptrSize
return dptr
def hashDataNextNode(node):
(nextp, h) = d.split('pI', node)
if d.extractPointer(nextp):
return nextp
start = (h % numBuckets) + 1
b = buckets + start * ptrSize
n = numBuckets - start
while n:
n -= 1
bb = d.extractPointer(b)
if bb != nextp:
return bb
b += ptrSize
return nextp
ptrSize = d.ptrSize()
dptr = d.extractPointer(value)
(fakeNext, buckets, ref, size, nodeSize, userNumBits, numBits, numBuckets) = \
d.split('ppiiihhi', dptr)
d.check(0 <= size and size <= 100 * 1000 * 1000)
d.check(-1 <= ref and ref < 100000)
d.putItemCount(size)
if d.isExpanded():
isShort = d.qtVersion() < 0x050000 and keyType.name == 'int'
with Children(d, size):
node = hashDataFirstNode()
for i in d.childRange():
if isShort:
typeCode = 'P{%s}@{%s}' % (keyType.name, valueType.name)
(pnext, key, padding2, val) = d.split(typeCode, node)
else:
typeCode = 'Pi@{%s}@{%s}' % (keyType.name, valueType.name)
(pnext, hashval, padding1, key, padding2, val) = d.split(typeCode, node)
d.putPairItem(i, (key, val), 'key', 'value')
node = hashDataNextNode(node)
def qform__QHashNode():
return mapForms()
def qdump__QHashNode(d, value):
d.putPairItem(None, value)
def qHashIteratorHelper(d, value):
typeName = value.type.name
hashTypeName = typeName[0:typeName.rfind('::')]
hashType = d.lookupType(hashTypeName)
keyType = hashType[0]
valueType = hashType[1]
d.putNumChild(1)
d.putEmptyValue()
if d.isExpanded():
with Children(d):
node = d.extractPointer(value)
isShort = d.qtVersion() < 0x050000 and keyType.name == 'int'
if isShort:
typeCode = 'P{%s}@{%s}' % (keyType.name, valueType.name)
(pnext, key, padding2, val) = d.split(typeCode, node)
else:
typeCode = 'Pi@{%s}@{%s}' % (keyType.name, valueType.name)
(pnext, hashval, padding1, key, padding2, val) = d.split(typeCode, node)
d.putSubItem('key', key)
d.putSubItem('value', val)
def qdump__QHash__const_iterator(d, value):
qHashIteratorHelper(d, value)
def qdump__QHash__iterator(d, value):
qHashIteratorHelper(d, value)
def qdump__QHostAddress(d, value):
dd = d.extractPointer(value)
qtVersion = d.qtVersion()
tiVersion = d.qtTypeInfoVersion()
#warn('QT: %x, TI: %s' % (qtVersion, tiVersion))
mayNeedParse = True
if tiVersion is not None:
if tiVersion >= 16:
# After a6cdfacf
p, scopeId, a6, a4, protocol = d.split('p{QString}16s{quint32}B', dd)
mayNeedParse = False
elif tiVersion >= 5:
# Branch 5.8.0 at f70b4a13 TI: 15
# Branch 5.7.0 at b6cf0418 TI: 5
(ipString, scopeId, a6, a4, protocol, isParsed) \
= d.split('{QString}{QString}16s{quint32}B{bool}', dd)
else:
(ipString, scopeId, a4, pad, a6, protocol, isParsed) \
= d.split('{QString}{QString}{quint32}I16sI{bool}', dd)
elif qtVersion >= 0x050600: # 5.6.0 at f3aabb42
if d.ptrSize() == 8 or d.isWindowsTarget():
(ipString, scopeId, a4, pad, a6, protocol, isParsed) \
= d.split('{QString}{QString}{quint32}I16sI{bool}', dd)
else:
(ipString, scopeId, a4, a6, protocol, isParsed) \
= d.split('{QString}{QString}{quint32}16sI{bool}', dd)
elif qtVersion >= 0x050000: # 5.2.0 at 62feb088
(ipString, scopeId, a4, a6, protocol, isParsed) \
= d.split('{QString}{QString}{quint32}16sI{bool}', dd)
else: # 4.8.7 at b05d05f
(a4, a6, protocol, pad, ipString, isParsed, pad, scopeId) \
= d.split('{quint32}16sB@{QString}{bool}@{QString}', dd)
if mayNeedParse:
ipStringData, ipStringSize, ipStringAlloc = d.stringData(ipString)
if mayNeedParse and isParsed.integer() and ipStringSize > 0:
d.putStringValue(ipString)
else:
# value.d.d->protocol:
# QAbstractSocket::IPv4Protocol = 0
# QAbstractSocket::IPv6Protocol = 1
if protocol == 1:
# value.d.d->a6
data = d.hexencode(a6)
address = ':'.join('%x' % int(data[i:i+4], 16) for i in xrange(0, 32, 4))
d.putValue(address)
elif protocol == 0:
# value.d.d->a
a = a4.integer()
a, n4 = divmod(a, 256)
a, n3 = divmod(a, 256)
a, n2 = divmod(a, 256)
a, n1 = divmod(a, 256)
d.putValue('%d.%d.%d.%d' % (n1, n2, n3, n4));
else:
d.putValue('<unspecified protocol %s>' % protocol)
d.putNumChild(4)
if d.isExpanded():
with Children(d):
if mayNeedParse:
d.putSubItem('ipString', ipString)
d.putSubItem('isParsed', isParsed)
d.putSubItem('scopeId', scopeId)
d.putSubItem('a', a4)
def qdump__QIPv6Address(d, value):
raw = d.split('16s', value)[0]
data = d.hexencode(raw)
d.putValue(':'.join('%x' % int(data[i:i+4], 16) for i in xrange(0, 32, 4)))
d.putArrayData(value.address(), 16, d.lookupType('unsigned char'))
def qform__QList():
return [DirectQListStorageFormat, IndirectQListStorageFormat]
def qdump__QList(d, value):
return qdumpHelper_QList(d, value, value.type[0])
def qdump__QVariantList(d, value):
qdumpHelper_QList(d, value, d.createType('QVariant'))
def qdumpHelper_QList(d, value, innerType):
base = d.extractPointer(value)
(ref, alloc, begin, end) = d.split('IIII', base)
array = base + 16
if d.qtVersion() < 0x50000:
array += d.ptrSize()
d.check(begin >= 0 and end >= 0 and end <= 1000 * 1000 * 1000)
size = end - begin
d.check(size >= 0)
#d.checkRef(private['ref'])
d.putItemCount(size)
if d.isExpanded():
innerSize = innerType.size()
stepSize = d.ptrSize()
addr = array + begin * stepSize
# The exact condition here is:
# QTypeInfo<T>::isLarge || QTypeInfo<T>::isStatic
# but this data is available neither in the compiled binary nor
# in the frontend.
# So as first approximation only do the 'isLarge' check:
displayFormat = d.currentItemFormat()
if displayFormat == DirectQListStorageFormat:
isInternal = True
elif displayFormat == IndirectQListStorageFormat:
isInternal = False
else:
isInternal = innerSize <= stepSize and innerType.isMovableType()
if isInternal:
if innerSize == stepSize:
d.putArrayData(addr, size, innerType)
else:
with Children(d, size, childType=innerType):
for i in d.childRange():
p = d.createValue(addr + i * stepSize, innerType)
d.putSubItem(i, p)
else:
# about 0.5s / 1000 items
with Children(d, size, maxNumChild=2000, childType=innerType):
for i in d.childRange():
p = d.extractPointer(addr + i * stepSize)
x = d.createValue(p, innerType)
d.putSubItem(i, x)
def qform__QImage():
return [SimpleFormat, SeparateFormat]
def qdump__QImage(d, value):
if d.qtVersion() < 0x050000:
(vtbl, painters, imageData) = value.split('ppp');
else:
(vtbl, painters, reserved, imageData) = value.split('pppp');
if imageData == 0:
d.putValue('(invalid)')
return
(ref, width, height, depth, nbytes, padding, devicePixelRatio, colorTable,
bits, iformat) = d.split('iiiii@dppi', imageData)
d.putValue('(%dx%d)' % (width, height))
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putIntItem('width', width)
d.putIntItem('height', height)
d.putIntItem('nbytes', nbytes)
d.putIntItem('format', iformat)
with SubItem(d, 'data'):
d.putValue('0x%x' % bits)
d.putNumChild(0)
d.putType('void *')
displayFormat = d.currentItemFormat()
if displayFormat == SeparateFormat:
d.putDisplay('imagedata:separate', '%08x%08x%08x%08x' % (width, height, nbytes, iformat)
+ d.readMemory(bits, nbytes))
def qdump__QLinkedList(d, value):
dd = d.extractPointer(value)
ptrSize = d.ptrSize()
n = d.extractInt(dd + 4 + 2 * ptrSize);
ref = d.extractInt(dd + 2 * ptrSize);
d.check(0 <= n and n <= 100*1000*1000)
d.check(-1 <= ref and ref <= 1000)
d.putItemCount(n)
if d.isExpanded():
innerType = value.type[0]
with Children(d, n, maxNumChild=1000, childType=innerType):
pp = d.extractPointer(dd)
for i in d.childRange():
d.putSubItem(i, d.createValue(pp + 2 * ptrSize, innerType))
pp = d.extractPointer(pp)
qqLocalesCount = None
def qdump__QLocale(d, value):
if d.isMsvcTarget(): # as long as this dumper relies on calling functions skip it for cdb
return
# Check for uninitialized 'index' variable. Retrieve size of
# QLocale data array from variable in qlocale.cpp.
# Default is 368 in Qt 4.8, 438 in Qt 5.0.1, the last one
# being 'System'.
#global qqLocalesCount
#if qqLocalesCount is None:
# #try:
# qqLocalesCount = int(value(ns + 'locale_data_size'))
# #except:
# qqLocalesCount = 438
#try:
# index = int(value['p']['index'])
#except:
# try:
# index = int(value['d']['d']['m_index'])
# except:
# index = int(value['d']['d']['m_data']...)
#d.check(index >= 0)
#d.check(index <= qqLocalesCount)
if d.qtVersion() < 0x50000:
d.putStringValue(d.call('const char *', value, 'name'))
d.putPlainChildren(value)
return
ns = d.qtNamespace()
dd = value.extractPointer()
(data, ref, numberOptions) = d.split('pi4s', dd)
(languageId, scriptId, countryId,
decimal, group, listt, percent, zero,
minus, plus, exponential) \
= d.split('2s{short}2s'
+ '{QChar}{QChar}{short}{QChar}{QChar}'
+ '{QChar}{QChar}{QChar}', data)
d.putStringValue(d.call('const char *', value, 'name'))
d.putNumChild(1)
if d.isExpanded():
with Children(d):
prefix = ns + 'QLocale::'
d.putSubItem('country', d.createValue(countryId, prefix + 'Country'))
d.putSubItem('language', d.createValue(languageId, prefix + 'Language'))
d.putSubItem('numberOptions', d.createValue(numberOptions, prefix + 'NumberOptions'))
d.putSubItem('decimalPoint', decimal)
d.putSubItem('exponential', exponential)
d.putSubItem('percent', percent)
d.putSubItem('zeroDigit', zero)
d.putSubItem('groupSeparator', group)
d.putSubItem('negativeSign', minus)
d.putSubItem('positiveSign', plus)
d.putCallItem('measurementSystem', '@QLocale::MeasurementSystem',
value, 'measurementSystem')
d.putCallItem('timeFormat_(short)', '@QString',
value, 'timeFormat', ns + 'QLocale::ShortFormat')
d.putCallItem('timeFormat_(long)', '@QString',
value, 'timeFormat', ns + 'QLocale::LongFormat')
d.putFields(value)
def qdump__QMapNode(d, value):
d.putEmptyValue()
d.putNumChild(2)
if d.isExpanded():
with Children(d):
d.putSubItem('key', value['key'])
d.putSubItem('value', value['value'])
def qdumpHelper_Qt4_QMap(d, value, keyType, valueType):
dd = value.extractPointer()
(dummy, it, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
ref, toplevel, n) = d.split('p' * 13 + 'iii', dd)
d.check(0 <= n and n <= 100*1000*1000)
d.checkRef(ref)
d.putItemCount(n)
if d.isExpanded():
if n > 10000:
n = 10000
typeCode = '{%s}@{%s}' % (keyType.name, valueType.name)
pp, payloadSize, fields = d.describeStruct(typeCode)
with Children(d, n):
for i in d.childRange():
key, pad, value = d.split(typeCode, it - payloadSize)
d.putPairItem(i, (key, value), 'key', 'value')
dummy, it = d.split('Pp', it)
def qdumpHelper_Qt5_QMap(d, value, keyType, valueType):
dptr = d.extractPointer(value)
(ref, n) = d.split('ii', dptr)
d.check(0 <= n and n <= 100*1000*1000)
d.check(-1 <= ref and ref < 100000)
d.putItemCount(n)
if d.isExpanded():
if n > 10000:
n = 10000
typeCode = 'ppp@{%s}@{%s}' % (keyType.name, valueType.name)
def helper(node):
(p, left, right, padding1, key, padding2, value) = d.split(typeCode, node)
if left:
for res in helper(left):
yield res
yield (key, value)
if right:
for res in helper(right):
yield res
with Children(d, n):
for (pair, i) in zip(helper(dptr + 8), range(n)):
d.putPairItem(i, pair, 'key', 'value')
def qform__QMap():
return mapForms()
def qdump__QMap(d, value):
qdumpHelper_QMap(d, value, value.type[0], value.type[1])
def qdumpHelper_QMap(d, value, keyType, valueType):
if d.qtVersion() < 0x50000:
qdumpHelper_Qt4_QMap(d, value, keyType, valueType)
else:
qdumpHelper_Qt5_QMap(d, value, keyType, valueType)
def qform__QMultiMap():
return mapForms()
def qdump__QMultiMap(d, value):
qdump__QMap(d, value)
def qform__QVariantMap():
return mapForms()
def qdump__QVariantMap(d, value):
qdumpHelper_QMap(d, value, d.createType('QString'), d.createType('QVariant'))
def qdump__QMetaMethod(d, value):
d.putQMetaStuff(value, 'QMetaMethod')
def qdump__QMetaEnum(d, value):
d.putQMetaStuff(value, 'QMetaEnum')
def qdump__QMetaProperty(d, value):
d.putQMetaStuff(value, 'QMetaProperty')
def qdump__QMetaClassInfo(d, value):
d.putQMetaStuff(value, 'QMetaClassInfo')
def qdump__QMetaObject(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putQObjectGutsHelper(0, 0, -1, value.address(), 'QMetaObject')
d.putMembersItem(value)
if False:
def qdump__QObjectPrivate__ConnectionList(d, value):
d.putNumChild(1)
if d.isExpanded():
i = 0
with Children(d):
first, last = value.split('pp')
currentConnection = first
connectionType = d.createType('@QObjectPrivate::Connection')
while currentConnection and currentConnection != last:
sender, receiver, slotObj, nextConnectionList, nextp, prev = \
d.split('pppppp', currentConnection)
d.putSubItem(i, d.createValue(currentConnection, connectionType))
currentConnection = nextp
i += 1
d.putFields(value)
d.putItemCount(i)
else:
d.putSpecialValue('minimumitemcount', 0)
def qdump__QProcEnvKey(d, value):
d.putByteArrayValue(value)
d.putPlainChildren(value)
def qdump__QPixmap(d, value):
if d.qtVersion() < 0x050000:
(vtbl, painters, dataPtr) = value.split('ppp');
else:
(vtbl, painters, reserved, dataPtr) = s = d.split('pppp', value);
if dataPtr == 0:
d.putValue('(invalid)')
else:
(dummy, width, height) = d.split('pii', dataPtr)
d.putValue('(%dx%d)' % (width, height))
d.putPlainChildren(value)
def qdump__QMargins(d, value):
d.putValue('left:%s, top:%s, right:%s, bottom:%s' % (value.split('iiii')))
d.putPlainChildren(value)
def qdump__QPoint(d, value):
d.putValue('(%s, %s)' % (value.split('ii')))
d.putPlainChildren(value)
def qdump__QPointF(d, value):
d.putValue('(%s, %s)' % (value.split('dd')))
d.putPlainChildren(value)
def qdump__QRect(d, value):
pp = lambda l: ('+' if l >= 0 else '') + str(l)
(x1, y1, x2, y2) = d.split('iiii', value)
d.putValue('%sx%s%s%s' % (x2 - x1 + 1, y2 - y1 + 1, pp(x1), pp(y1)))
d.putPlainChildren(value)
def qdump__QRectF(d, value):
pp = lambda l: ('+' if l >= 0 else '') + str(l)
(x, y, w, h) = value.split('dddd')
d.putValue('%sx%s%s%s' % (w, h, pp(x), pp(y)))
d.putPlainChildren(value)
def qdump__QRegExp(d, value):
# value.priv.engineKey.pattern
privAddress = d.extractPointer(value)
(eng, pattern) = d.split('p{QString}', privAddress)
d.putStringValue(pattern)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
try:
d.call('void', value, 'capturedTexts') # Warm up internal cache.
except:
# Might fail (LLDB, Core files, ...), still cache might be warm.
pass
(patternSyntax, caseSensitive, minimal, pad, t, captures) \
= d.split('{int}{int}B@{QString}{QStringList}', privAddress + 2 * d.ptrSize())
d.putSubItem('syntax', patternSyntax.cast(d.qtNamespace() + 'QRegExp::PatternSyntax'))
d.putSubItem('captures', captures)
def qdump__QRegion(d, value):
regionDataPtr = d.extractPointer(value)
if regionDataPtr == 0:
d.putSpecialValue('empty')
d.putNumChild(0)
else:
if d.qtVersion() >= 0x050400: # Padding removed in ee324e4ed
(ref, pad, rgn) = d.split('i@p', regionDataPtr)
(numRects, innerArea, rects, extents, innerRect) = \
d.split('iiP{QRect}{QRect}', rgn)
elif d.qtVersion() >= 0x050000:
(ref, pad, rgn) = d.split('i@p', regionDataPtr)
(numRects, pad, rects, extents, innerRect, innerArea) = \
d.split('i@P{QRect}{QRect}i', rgn)
else:
if d.isWindowsTarget():
(ref, pad, rgn) = d.split('i@p', regionDataPtr)
else:
(ref, pad, xrgn, xrectangles, rgn) = d.split('i@ppp', regionDataPtr)
if rgn == 0:
numRects = 0
else:
(numRects, pad, rects, extents, innerRect, innerArea) = \
d.split('i@P{QRect}{QRect}i', rgn)
d.putItemCount(numRects)
if d.isExpanded():
with Children(d):
d.putIntItem('numRects', numRects)
d.putIntItem('innerArea', innerArea)
d.putSubItem('extents', extents)
d.putSubItem('innerRect', innerRect)
d.putSubItem('rects', d.createVectorItem(rects, d.qtNamespace() + 'QRect'))
def qdump__QScopedPointer(d, value):
if value.pointer() == 0:
d.putValue('(null)')
d.putNumChild(0)
else:
d.putItem(value['d'])
d.putValue(d.currentValue.value, d.currentValue.encoding)
typeName = value.type.name
if value.type[1].name == d.qtNamespace() + 'QScopedPointerDeleter<%s>' % value.type[0].name:
typeName = d.qtNamespace() + 'QScopedPointer<%s>' % value.type[0].name
d.putBetterType(typeName)
def qdump__QSet(d, value):
def hashDataFirstNode():
b = buckets
n = numBuckets
while n:
n -= 1
bb = d.extractPointer(b)
if bb != dptr:
return bb
b += ptrSize
return dptr
def hashDataNextNode(node):
(nextp, h) = d.split('pI', node)
if d.extractPointer(nextp):
return nextp
start = (h % numBuckets) + 1
b = buckets + start * ptrSize
n = numBuckets - start
while n:
n -= 1
bb = d.extractPointer(b)
if bb != nextp:
return bb
b += ptrSize
return nextp
ptrSize = d.ptrSize()
dptr = d.extractPointer(value)
(fakeNext, buckets, ref, size, nodeSize, userNumBits, numBits, numBuckets) = \
d.split('ppiiihhi', dptr)
d.check(0 <= size and size <= 100 * 1000 * 1000)
d.check(-1 <= ref and ref < 100000)
d.putItemCount(size)
if d.isExpanded():
keyType = value.type[0]
isShort = d.qtVersion() < 0x050000 and keyType.name == 'int'
with Children(d, size, childType=keyType):
node = hashDataFirstNode()
for i in d.childRange():
if isShort:
typeCode = 'P{%s}' % keyType.name
(pnext, key) = d.split(typeCode, node)
else:
typeCode = 'Pi@{%s}' % keyType.name
(pnext, hashval, padding1, key) = d.split(typeCode, node)
with SubItem(d, i):
d.putItem(key)
node = hashDataNextNode(node)
def qdump__QSharedData(d, value):
d.putValue('ref: %s' % value.to('i'))
d.putNumChild(0)
def qdump__QSharedDataPointer(d, value):
d_ptr = value['d']
if d_ptr.pointer() == 0:
d.putValue('(null)')
d.putNumChild(0)
else:
# This replaces the pointer by the pointee, making the
# pointer transparent.
try:
innerType = value.type[0]
except:
d.putValue(d_ptr)
d.putPlainChildren(value)
return
d.putBetterType(d.currentType)
d.putItem(d_ptr.dereference())
def qdump__QSize(d, value):
d.putValue('(%s, %s)' % value.split('ii'))
d.putPlainChildren(value)
def qdump__QSizeF(d, value):
d.putValue('(%s, %s)' % value.split('dd'))
d.putPlainChildren(value)
def qdump__QSizePolicy__Policy(d, value):
d.putEnumValue(value.integer(), {
0 : 'QSizePolicy::Fixed',
1 : 'QSizePolicy::GrowFlag',
2 : 'QSizePolicy::ExpandFlag',
3 : 'QSizePolicy::MinimumExpanding (GrowFlag|ExpandFlag)',
4 : 'QSizePolicy::ShrinkFlag',
5 : 'QSizePolicy::Preferred (GrowFlag|ShrinkFlag)',
7 : 'QSizePolicy::Expanding (GrowFlag|ShrinkFlag|ExpandFlag)',
8 : 'QSizePolicy::IgnoreFlag',
13 : 'QSizePolicy::Ignored (ShrinkFlag|GrowFlag|IgnoreFlag)',
})
def qdump__QSizePolicy(d, value):
bits = value.integer()
d.putEmptyValue(-99)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putIntItem('horStretch', (bits >> 0) & 0xff)
d.putIntItem('verStretch', (bits >> 8) & 0xff)
d.putEnumItem('horPolicy', (bits >> 16) & 0xf, "@QSizePolicy::Policy")
d.putEnumItem('verPolicy', (bits >> 20) & 0xf, "@QSizePolicy::Policy")
def qform__QStack():
return arrayForms()
def qdump__QStack(d, value):
qdump__QVector(d, value)
def qdump__QPolygonF(d, value):
data, size, alloc = d.vectorDataHelper(d.extractPointer(value))
d.putItemCount(size)
d.putPlotData(data, size, d.createType('QPointF'))
def qdump__QPolygon(d, value):
data, size, alloc = d.vectorDataHelper(d.extractPointer(value))
d.putItemCount(size)
d.putPlotData(data, size, d.createType('QPoint'))
def qdump__QGraphicsPolygonItem(d, value):
(vtbl, dptr) = value.split('pp')
# Assume sizeof(QGraphicsPolygonItemPrivate) == 400
if d.ptrSize() == 8:
offset = 384
elif d.isWindowsTarget():
offset = 328 if d.isMsvcTarget() else 320
else:
offset = 308
data, size, alloc = d.vectorDataHelper(d.extractPointer(dptr + offset))
d.putItemCount(size)
d.putPlotData(data, size, d.createType('QPointF'))
def qedit__QString(d, value, data):
d.call('void', value, 'resize', str(len(data)))
(base, size, alloc) = d.stringData(value)
d.setValues(base, 'short', [ord(c) for c in data])
def qform__QString():
return [SimpleFormat, SeparateFormat]
def qdump__QString(d, value):
d.putStringValue(value)
(data, size, alloc) = d.stringData(value)
d.putNumChild(size)
displayFormat = d.currentItemFormat()
if displayFormat == SeparateFormat:
d.putDisplay('utf16:separate', d.encodeString(value, limit=100000))
if d.isExpanded():
d.putArrayData(data, size, d.createType('QChar'))
def qdump__QStaticStringData(d, value):
size = value.type[0]
(ref, size, alloc, pad, offset, data) = value.split('iii@p%ss' % (2 * size))
d.putValue(d.hexencode(data), 'utf16')
d.putPlainChildren(value)
def qdump__QTypedArrayData(d, value):
if value.type[0].name == 'unsigned short':
qdump__QStringData(d, value)
else:
qdump__QArrayData(d, value)
def qdump__QStringData(d, value):
(ref, size, alloc, pad, offset) = value.split('III@p')
elided, shown = d.computeLimit(size, d.displayStringLimit)
data = d.readMemory(value.address() + offset, shown * 2)
d.putValue(data, 'utf16', elided=elided)
d.putNumChild(1)
d.putPlainChildren(value)
def qdump__QHashedString(d, value):
qdump__QString(d, value)
d.putBetterType(value.type)
def qdump__QQmlRefCount(d, value):
d.putItem(value['refCount'])
d.putBetterType(value.type)
def qdump__QStringRef(d, value):
(stringptr, pos, size) = value.split('pii')
if stringptr == 0:
d.putValue('(null)');
d.putNumChild(0)
return
(data, ssize, alloc) = d.stringData(d.createValue(stringptr, 'QString'))
d.putValue(d.readMemory(data + 2 * pos, 2 * size), 'utf16')
d.putPlainChildren(value)
def qdump__QStringList(d, value):
qdumpHelper_QList(d, value, d.createType('QString'))
d.putBetterType(value.type)
def qdump__QTemporaryFile(d, value):
qdump__QFile(d, value)
def qdump__QTextCodec(d, value):
name = d.call('const char *', value, 'name')
d.putValue(d.encodeByteArray(name, limit=100), 6)
d.putNumChild(2)
if d.isExpanded():
with Children(d):
d.putCallItem('name', '@QByteArray', value, 'name')
d.putCallItem('mibEnum', 'int', value, 'mibEnum')
d.putFields(value)
def qdump__QTextCursor(d, value):
privAddress = d.extractPointer(value)
if privAddress == 0:
d.putValue('(invalid)')
d.putNumChild(0)
else:
positionAddress = privAddress + 2 * d.ptrSize() + 8
d.putValue(d.extractInt(positionAddress))
d.putNumChild(1)
if d.isExpanded():
with Children(d):
positionAddress = privAddress + 2 * d.ptrSize() + 8
d.putIntItem('position', d.extractInt(positionAddress))
d.putIntItem('anchor', d.extractInt(positionAddress + 4))
d.putCallItem('selected', '@QString', value, 'selectedText')
d.putFields(value)
def qdump__QTextDocument(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putCallItem('blockCount', 'int', value, 'blockCount')
d.putCallItem('characterCount', 'int', value, 'characterCount')
d.putCallItem('lineCount', 'int', value, 'lineCount')
d.putCallItem('revision', 'int', value, 'revision')
d.putCallItem('toPlainText', '@QString', value, 'toPlainText')
d.putFields(value)
def qform__QUrl():
return [SimpleFormat, SeparateFormat]
def qdump__QUrl(d, value):
privAddress = d.extractPointer(value)
if not privAddress:
# d == 0 if QUrl was constructed with default constructor
d.putValue('<invalid>')
d.putNumChild(0)
return
if d.qtVersion() < 0x050000:
d.call('void', value, 'port') # Warm up internal cache.
d.call('void', value, 'path')
st = '{QString}'
ba = '{QByteArray}'
(ref, dummy,
scheme, userName, password, host, path, # QString
query, # QByteArray
fragment, # QString
encodedOriginal, encodedUserName, encodedPassword,
encodedPath, encodedFragment, # QByteArray
port) \
= d.split('i@' + st*5 + ba + st + ba*5 + 'i', privAddress)
else:
(ref, port, scheme, userName, password, host, path, query, fragment) \
= d.split('ii' + '{QString}' * 7, privAddress)
userNameEnc = d.encodeString(userName)
hostEnc = d.encodeString(host)
pathEnc = d.encodeString(path)
url = d.encodeString(scheme)
url += '3a002f002f00' # '://'
if len(userNameEnc):
url += userNameEnc + '4000' # '@'
url += hostEnc
if port >= 0:
url += '3a00' + ''.join(['%02x00' % ord(c) for c in str(port)])
url += pathEnc
d.putValue(url, 'utf16')
displayFormat = d.currentItemFormat()
if displayFormat == SeparateFormat:
d.putDisplay('utf16:separate', url)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putIntItem('port', port)
d.putSubItem('scheme', scheme)
d.putSubItem('userName', userName)
d.putSubItem('password', password)
d.putSubItem('host', host)
d.putSubItem('path', path)
d.putSubItem('query', query)
d.putSubItem('fragment', fragment)
d.putFields(value)
def qdump__QUuid(d, value):
r = value.split('IHHBBBBBBBB')
d.putValue('{%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x}' % r)
d.putNumChild(1)
d.putPlainChildren(value)
def qdumpHelper_QVariant_0(d, value):
# QVariant::Invalid
d.putBetterType('%sQVariant (invalid)' % d.qtNamespace())
d.putValue('(invalid)')
def qdumpHelper_QVariant_1(d, value):
# QVariant::Bool
d.putBetterType('%sQVariant (bool)' % d.qtNamespace())
d.putValue('true' if value.to('b') else 'false')
def qdumpHelper_QVariant_2(d, value):
# QVariant::Int
d.putBetterType('%sQVariant (int)' % d.qtNamespace())
d.putValue(value.to('i'))
def qdumpHelper_QVariant_3(d, value):
# uint
d.putBetterType('%sQVariant (uint)' % d.qtNamespace())
d.putValue(value.to('I'))
def qdumpHelper_QVariant_4(d, value):
# qlonglong
d.putBetterType('%sQVariant (qlonglong)' % d.qtNamespace())
d.putValue(value.to('q'))
def qdumpHelper_QVariant_5(d, value):
# qulonglong
d.putBetterType('%sQVariant (qulonglong)' % d.qtNamespace())
d.putValue(value.to('Q'))
def qdumpHelper_QVariant_6(d, value):
# QVariant::Double
d.putBetterType('%sQVariant (double)' % d.qtNamespace())
d.putValue(value.to('d'))
qdumpHelper_QVariants_A = [
qdumpHelper_QVariant_0,
qdumpHelper_QVariant_1,
qdumpHelper_QVariant_2,
qdumpHelper_QVariant_3,
qdumpHelper_QVariant_4,
qdumpHelper_QVariant_5,
qdumpHelper_QVariant_6
]
qdumpHelper_QVariants_B = [
'QChar', # 7
'QVariantMap', # 8
'QVariantList',# 9
'QString', # 10
'QStringList', # 11
'QByteArray', # 12
'QBitArray', # 13
'QDate', # 14
'QTime', # 15
'QDateTime', # 16
'QUrl', # 17
'QLocale', # 18
'QRect', # 19
'QRectF', # 20
'QSize', # 21
'QSizeF', # 22
'QLine', # 23
'QLineF', # 24
'QPoint', # 25
'QPointF', # 26
'QRegExp', # 27
'QVariantHash',# 28
]
def qdumpHelper_QVariant_31(d, value):
# QVariant::VoidStar
d.putBetterType('%sQVariant (void *)' % d.qtNamespace())
d.putValue('0x%x' % d.extractPointer(value))
def qdumpHelper_QVariant_32(d, value):
# QVariant::Long
d.putBetterType('%sQVariant (long)' % d.qtNamespace())
if d.ptrSize() == 4:
d.putValue('%s' % d.extractInt(value))
else:
d.putValue('%s' % d.extractInt64(value)) # sic!
def qdumpHelper_QVariant_33(d, value):
# QVariant::Short
d.putBetterType('%sQVariant (short)' % d.qtNamespace())
d.putValue('%s' % d.extractShort(value))
def qdumpHelper_QVariant_34(d, value):
# QVariant::Char
d.putBetterType('%sQVariant (char)' % d.qtNamespace())
d.putValue('%s' % d.extractByte(value))
def qdumpHelper_QVariant_35(d, value):
# QVariant::ULong
d.putBetterType('%sQVariant (unsigned long)' % d.qtNamespace())
if d.ptrSize() == 4:
d.putValue('%s' % d.extractUInt(value))
else:
d.putValue('%s' % d.extractUInt64(value)) # sic!
def qdumpHelper_QVariant_36(d, value):
# QVariant::UShort
d.putBetterType('%sQVariant (unsigned short)' % d.qtNamespace())
d.putValue('%s' % d.extractUShort(value))
def qdumpHelper_QVariant_37(d, value):
# QVariant::UChar
d.putBetterType('%sQVariant (unsigned char)' % d.qtNamespace())
d.putValue('%s' % d.extractByte(value))
def qdumpHelper_QVariant_38(d, value):
# QVariant::Float
d.putBetterType('%sQVariant (float)' % d.qtNamespace())
d.putValue(value.to('f'))
qdumpHelper_QVariants_D = [
qdumpHelper_QVariant_31,
qdumpHelper_QVariant_32,
qdumpHelper_QVariant_33,
qdumpHelper_QVariant_34,
qdumpHelper_QVariant_35,
qdumpHelper_QVariant_36,
qdumpHelper_QVariant_37,
qdumpHelper_QVariant_38
]
qdumpHelper_QVariants_E = [
'QFont', # 64
'QPixmap', # 65
'QBrush', # 66
'QColor', # 67
'QPalette', # 68
'QIcon', # 69
'QImage', # 70
'QPolygon', # 71
'QRegion', # 72
'QBitmap', # 73
'QCursor', # 74
]
qdumpHelper_QVariants_F = [
# Qt 5. In Qt 4 add one.
'QKeySequence',# 75
'QPen', # 76
'QTextLength', # 77
'QTextFormat', # 78
'X',
'QTransform', # 80
'QMatrix4x4', # 81
'QVector2D', # 82
'QVector3D', # 83
'QVector4D', # 84
'QQuaternion', # 85
'QPolygonF' # 86
]
def qdump__QVariant(d, value):
(data, typeStuff) = d.split('8sI', value)
variantType = typeStuff & 0x3fffffff
isShared = bool(typeStuff & 0x40000000)
# Well-known simple type.
if variantType <= 6:
qdumpHelper_QVariants_A[variantType](d, value)
d.putNumChild(0)
return None
# Extended Core type (Qt 5)
if variantType >= 31 and variantType <= 38 and d.qtVersion() >= 0x050000:
qdumpHelper_QVariants_D[variantType - 31](d, value)
d.putNumChild(0)
return None
# Extended Core type (Qt 4)
if variantType >= 128 and variantType <= 135 and d.qtVersion() < 0x050000:
if variantType == 128:
d.putBetterType('%sQVariant (void *)' % d.qtNamespace())
d.putValue('0x%x' % value.extractPointer())
else:
if variantType == 135: # Float
blob = value
else:
p = d.extractPointer(value)
blob = d.extractUInt64(p)
qdumpHelper_QVariants_D[variantType - 128](d, blob)
d.putNumChild(0)
return None
#warn('TYPE: %s' % variantType)
if variantType <= 86:
# Known Core or Gui type.
if variantType <= 28:
innert = qdumpHelper_QVariants_B[variantType - 7]
elif variantType <= 74:
innert = qdumpHelper_QVariants_E[variantType - 64]
elif d.qtVersion() < 0x050000:
innert = qdumpHelper_QVariants_F[variantType - 76]
else:
innert = qdumpHelper_QVariants_F[variantType - 75]
#data = value['d']['data']
innerType = d.qtNamespace() + innert
#warn('SHARED: %s' % isShared)
if isShared:
base1 = d.extractPointer(value)
#warn('BASE 1: %s %s' % (base1, innert))
base = d.extractPointer(base1)
#warn('SIZE 1: %s' % size)
val = d.createValue(base, innerType)
else:
#warn('DIRECT ITEM 1: %s' % innerType)
val = d.createValue(data, innerType)
val.laddress = value.laddress
d.putEmptyValue(-99)
d.putItem(val)
d.putBetterType('%sQVariant (%s)' % (d.qtNamespace(), innert))
return innert
# User types.
ns = d.qtNamespace()
d.putEmptyValue(-99)
d.putType('%sQVariant (%s)' % (ns, variantType))
d.putNumChild(1)
if d.isExpanded():
innerType = None
with Children(d):
ev = d.parseAndEvaluate
p = None
if p is None:
# Without debug info.
symbol = d.mangleName(d.qtNamespace() + 'QMetaType::typeName') + 'i'
p = ev('((const char *(*)(int))%s)(%d)' % (symbol, variantType))
#if p is None:
# p = ev('((const char *(*)(int))%sQMetaType::typeName)(%d)' % (ns, variantType))
if p is None:
# LLDB on Linux
p = ev('((const char *(*)(int))QMetaType::typeName)(%d)' % variantType)
if p is None:
d.putSpecialValue('notcallable')
return None
ptr = p.pointer()
(elided, blob) = d.encodeCArray(ptr, 1, 100)
innerType = d.hexdecode(blob)
# Prefer namespaced version.
if len(ns) > 0:
if not d.lookupNativeType(ns + innerType) is None:
innerType = ns + innerType
if isShared:
base1 = d.extractPointer(value)
base = d.extractPointer(base1)
val = d.createValue(base, innerType)
else:
val = d.createValue(data, innerType)
val.laddress = value.laddress
d.putSubItem('data', val)
if not innerType is None:
d.putBetterType('%sQVariant (%s)' % (ns, innerType))
return None
def qedit__QVector(d, value, data):
values = data.split(',')
d.call('void', value, 'resize', str(len(values)))
base, vsize, valloc = d.vectorDataHelper(d.extractPointer(value))
d.setValues(base, value.type[0].name, values)
def qform__QVector():
return arrayForms()
def qdump__QVector(d, value):
dd = d.extractPointer(value)
data, size, alloc = d.vectorDataHelper(dd)
d.check(0 <= size and size <= alloc and alloc <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putPlotData(data, size, value.type[0])
if False:
def qdump__QObjectConnectionList(d, value):
dd = d.extractPointer(value)
data, size, alloc = d.vectorDataHelper(dd)
d.check(0 <= size and size <= alloc and alloc <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putPlotData(data, size, d.createType('@QObjectPrivate::ConnectionList'))
def qdump__QVarLengthArray(d, value):
(cap, size, data) = value.split('iip')
d.check(0 <= size)
d.putItemCount(size)
d.putPlotData(data, size, value.type[0])
def qdump__QSharedPointer(d, value):
qdump_QWeakPointerHelper(d, value, False)
def qdump__QWeakPointer(d, value):
qdump_QWeakPointerHelper(d, value, True)
def qdump__QPointer(d, value):
# actually, we'd use value['wp'] instead of value, but since we
# only split() on the result and the (sub-)object address is the
# same it does not matter but saves some cycles.
qdump_QWeakPointerHelper(d, value, True, value.type[0])
def qdump_QWeakPointerHelper(d, value, isWeak, innerType = None):
if isWeak:
(d_ptr, val) = value.split('pp')
else:
(val, d_ptr) = value.split('pp')
if d_ptr == 0 and val == 0:
d.putValue('(null)')
d.putNumChild(0)
return
if d_ptr == 0 or val == 0:
d.putValue('<invalid>')
d.putNumChild(0)
return
if d.qtVersion() >= 0x050000:
(weakref, strongref) = d.split('ii', d_ptr)
else:
(vptr, weakref, strongref) = d.split('pii', d_ptr)
d.check(strongref >= -1)
d.check(strongref <= weakref)
d.check(weakref <= 10*1000*1000)
if innerType is None:
innerType = value.type[0]
with Children(d):
short = d.putSubItem('data', d.createValue(val, innerType))
d.putIntItem('weakref', weakref)
d.putIntItem('strongref', strongref)
d.putValue(short.value, short.encoding)
def qdump__QXmlAttributes__Attribute(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
(qname, uri, localname, val) = value.split('{QString}' * 4)
d.putSubItem('qname', qname)
d.putSubItem('uri', uri)
d.putSubItem('localname', localname)
d.putSubItem('value', val)
def qdump__QXmlAttributes(d, value):
(vptr, atts) = value.split('pP')
innerType = d.createType(d.qtNamespace() + 'QXmlAttributes::Attribute', 4 * d.ptrSize())
val = d.createListItem(atts, innerType)
qdumpHelper_QList(d, val, innerType)
def qdump__QXmlStreamStringRef(d, value):
s = value['m_string']
(data, size, alloc) = d.stringData(s)
data += 2 * int(value['m_position'])
size = int(value['m_size'])
s = d.readMemory(data, 2 * size)
d.putValue(s, 'utf16')
d.putPlainChildren(value)
def qdump__QXmlStreamAttribute(d, value):
s = value['m_name']['m_string']
(data, size, alloc) = d.stringData(s)
data += 2 * int(value['m_name']['m_position'])
size = int(value['m_name']['m_size'])
s = d.readMemory(data, 2 * size)
d.putValue(s, 'utf16')
d.putPlainChildren(value)
#######################################################################
#
# V4
#
#######################################################################
def extractQmlData(d, value):
#if value.type.code == TypeCodePointer:
# value = value.dereference()
base = value.split('p')[0]
#mmdata = d.split('Q', base)[0]
#PointerMask = 0xfffffffffffffffd
#vtable = mmdata & PointerMask
#warn('QML DATA: %s' % value.stringify())
#data = value['data']
#return #data.cast(d.lookupType(value.type.name.replace('QV4::', 'QV4::Heap::')))
typeName = value.type.name.replace('QV4::', 'QV4::Heap::')
#warn('TYOE DATA: %s' % typeName)
return d.createValue(base, typeName)
def qdump__QV4__Heap__Base(d, value):
mm_data = value.extractPointer()
d.putValue('[%s]' % mm_data)
if d.isExpanded():
with Children(d):
with SubItem(d, 'vtable'):
d.putItem(d.createValue(mm_data & (~3), d.qtNamespace() + 'QV4::VTable'))
d.putBoolItem('isMarked', mm_data & 1)
d.putBoolItem('inUse', (mm_data & 2) == 0)
with SubItem(d, 'nextFree'):
d.putItem(d.createValue(mm_data & (~3), value.type))
def qdump__QV4__Heap__String(d, value):
# Note: There's also the 'Identifier' case. And the largestSubLength != 0 case.
(baseClass, textOrLeft, idOrRight, subtype, stringHash, largestSub, length, mm) \
= value.split('QppIIIIp')
textPtr = d.split('{QStringDataPtr}', textOrLeft)[0]
qdump__QStringData(d, d.createValue(textOrLeft, d.qtNamespace() + 'QStringData'))
if d.isExpanded():
with Children(d):
d.putFields(value)
def qmlPutHeapChildren(d, value):
d.putItem(extractQmlData(d, value))
def qdump__QV4__Object(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__FunctionObject(d, value):
#qmlPutHeapChildren(d, value)
d.putEmptyValue()
if d.isExpanded():
with Children(d):
d.putFields(value)
d.putSubItem('heap', extractQmlData(d, value))
d.putCallItem('sourceLocation', '@QQmlSourceLocation',
value, 'sourceLocation')
def qdump__QV4__CompilationUnit(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__CallContext(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__ScriptFunction(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__SimpleScriptFunction(d, value):
qdump__QV4__FunctionObject(d, value)
def qdump__QV4__ExecutionContext(d, value):
qmlPutHeapChildren(d, value)
def qdump__QQmlSourceLocation(d, value):
(sourceFile, line, col) = value.split('pHH')
(data, size, alloc) = d.stringData(value)
d.putValue(d.readMemory(data, 2 * size), 'utf16')
d.putField('valuesuffix', ':%s:%s' % (line, col))
d.putPlainChildren(value)
#def qdump__QV4__CallData(d, value):
# argc = value['argc'].integer()
# d.putItemCount(argc)
# if d.isExpanded():
# with Children(d):
# d.putSubItem('[this]', value['thisObject'])
# for i in range(0, argc):
# d.putSubItem(i, value['args'][i])
#
def qdump__QV4__String(d, value):
qmlPutHeapChildren(d, value)
def qdump__QV4__Identifier(d, value):
d.putStringValue(value)
d.putPlainChildren(value)
def qdump__QV4__PropertyHash(d, value):
data = value.extractPointer()
(ref, alloc, size, numBits, entries) = d.split('iiiip', data)
n = 0
innerType = d.qtNamespace() + 'QV4::Identifier'
with Children(d):
for i in range(alloc):
(identifier, index) = d.split('pI', entries + i * 2 * d.ptrSize())
if identifier != 0:
n += 1
with SubItem(d):
d.putItem(d, d.createValue(identifier, innerType))
d.put('keysuffix', ' %d' % index)
d.putItemCount(n)
d.putPlainChildren(value)
def qdump__QV4__InternalClass__Transition(d, value):
identifier = d.createValue(value.extractPointer(), d.qtNamespace() + 'QV4::Identifier')
d.putStringValue(identifier)
d.putPlainChildren(value)
def qdump__QV4__InternalClassTransition(d, value):
qdump__QV4__InternalClass__Transition(d, value)
def qdump__QV4__SharedInternalClassData(d, value):
(ref, alloc, size, pad, data) = value.split('iIIip')
val = d.createValue(data, value.type[0])
with Children(d):
with SubItem(d, 'data'):
d.putItem(val)
short = d.currentValue
d.putIntItem('size', size)
d.putIntItem('alloc', alloc)
d.putIntItem('refcount', ref)
d.putValue(short.value, short.encoding)
def qdump__QV4__IdentifierTable(d, value):
(engine, alloc, size, numBits, pad, entries) = value.split('piiiip')
n = 0
innerType = d.qtNamespace() + 'QV4::Heap::String'
with Children(d):
for i in range(alloc):
identifierPtr = d.extractPointer(entries + i * d.ptrSize())
if identifierPtr != 0:
n += 1
with SubItem(d, None):
d.putItem(d.createValue(identifierPtr, innerType))
d.putItemCount(n)
d.putPlainChildren(value)
if False:
# 32 bit.
QV4_Masks_SilentNaNBit = 0x00040000
QV4_Masks_NaN_Mask = 0x7ff80000
QV4_Masks_NotDouble_Mask = 0x7ffa0000
QV4_Masks_Type_Mask = 0xffffc000
QV4_Masks_Immediate_Mask = QV4_Masks_NotDouble_Mask | 0x00004000 | QV4_Masks_SilentNaNBit
QV4_Masks_IsNullOrUndefined_Mask = QV4_Masks_Immediate_Mask | 0x08000
QV4_Masks_Tag_Shift = 32
QV4_ValueType_Undefined_Type = QV4_Masks_Immediate_Mask | 0x00000
QV4_ValueType_Null_Type = QV4_Masks_Immediate_Mask | 0x10000
QV4_ValueType_Boolean_Type = QV4_Masks_Immediate_Mask | 0x08000
QV4_ValueType_Integer_Type = QV4_Masks_Immediate_Mask | 0x18000
QV4_ValueType_Managed_Type = QV4_Masks_NotDouble_Mask | 0x00000 | QV4_Masks_SilentNaNBit
QV4_ValueType_Empty_Type = QV4_Masks_NotDouble_Mask | 0x18000 | QV4_Masks_SilentNaNBit
QV4_ConvertibleToInt = QV4_Masks_Immediate_Mask | 0x1
QV4_ValueTypeInternal_Null_Type_Internal = QV4_ValueType_Null_Type | QV4_ConvertibleToInt
QV4_ValueTypeInternal_Boolean_Type_Internal = QV4_ValueType_Boolean_Type | QV4_ConvertibleToInt
QV4_ValueTypeInternal_Integer_Type_Internal = QV4_ValueType_Integer_Type | QV4_ConvertibleToInt
def QV4_getValue(d, jsval): # (Dumper, QJSValue *jsval) -> QV4::Value *
dd = d.split('Q', jsval)[0]
if dd & 3:
return 0
return dd
def QV4_getVariant(d, jsval): # (Dumper, QJSValue *jsval) -> QVariant *
dd = d.split('Q', jsval)[0]
if dd & 1:
return dd & ~3
return 0
def QV4_valueForData(d, jsval): # (Dumper, QJSValue *jsval) -> QV4::Value *
v = QV4_getValue(d, jsval)
if v:
return v
warn('Not implemented: VARIANT')
return 0
def QV4_putObjectValue(d, objectPtr):
ns = d.qtNamespace()
base = d.extractPointer(objectPtr)
(inlineMemberOffset, inlineMemberSize, internalClass, prototype,
memberData, arrayData) = d.split('IIpppp', base)
d.putValue('PTR: 0x%x' % objectPtr)
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % objectPtr)
d.putType(' ');
d.putNumChild(0)
d.putIntItem('inlineMemberOffset', inlineMemberOffset)
d.putIntItem('inlineMemberSize', inlineMemberSize)
d.putIntItem('internalClass', internalClass)
d.putIntItem('prototype', prototype)
d.putPtrItem('memberData', memberData)
d.putPtrItem('arrayData', arrayData)
d.putSubItem('OBJ', d.createValue(objectPtr, ns + 'QV4::Object'))
#d.putFields(value)
def qdump__QV4_Object(d, value):
ns = d.qtNamespace()
d.putEmptyValue()
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
base = d.extractPointer(objectPtr)
(inlineMemberOffset, inlineMemberSize, internalClass, prototype,
memberData, arrayData) = d.split('IIpppp', base)
d.putValue('PTR: 0x%x' % objectPtr)
def qdump__QV4__Value(d, value):
if d.ptrSize() == 4:
qdump_32__QV4__Value(d, value)
else:
qdump_64__QV4__Value(d, value)
def qdump_32__QV4__Value(d, value):
# QV4_Masks_SilentNaNBit = 0x00040000
# QV4_Masks_NaN_Mask = 0x7ff80000
# QV4_Masks_NotDouble_Mask = 0x7ffa0000
# QV4_Masks_Type_Mask = 0xffffc000
ns = d.qtNamespace()
v = value.split('Q')[0]
tag = v >> 32
val = v & 0xffffffff
if (tag & 0x7fff2000) == 0x7fff2000: # Int
d.putValue(val)
d.putBetterType('%sQV4::Value (int32)' % ns)
elif (tag & 0x7fff4000) == 0x7fff4000: # Bool
d.putValue(val)
d.putBetterType('%sQV4::Value (bool)' % ns)
elif (tag & 0x7fff0000) == 0x7fff0000: # Null
d.putValue(val)
d.putBetterType('%sQV4::Value (null)' % ns)
elif (tag & 0x7ffa0000) != 0x7ffa0000: # Double
d.putValue(value.split('d')[0])
d.putBetterType('%sQV4::Value (double)' % ns)
elif tag == 0x7ffa0000:
if val == 0:
d.putValue('(undefined)')
d.putBetterType('%sQV4::Value (undefined)' % ns)
else:
managed = d.createValue(val, ns + 'QV4::Heap::Base')
qdump__QV4__Heap__Base(d, managed)
#d.putValue('[0x%x]' % v)
#d.putPlainChildren(value)
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % v)
d.putType(' ');
d.putNumChild(0)
with SubItem(d, '[val]'):
d.putValue('[0x%x]' % val)
d.putType(' ');
d.putNumChild(0)
with SubItem(d, '[tag]'):
d.putValue('[0x%x]' % tag)
d.putType(' ');
d.putNumChild(0)
#with SubItem(d, '[vtable]'):
# d.putItem(d.createValue(vtable, ns + 'QV4::VTable'))
# d.putType(' ');
# d.putNumChild(0)
d.putFields(value)
def qdump_64__QV4__Value(d, value):
dti = d.qtDeclarativeTypeInfoVersion()
new = dti is not None and dti >= 2
if new:
QV4_NaNEncodeMask = 0xfffc000000000000
QV4_Masks_Immediate_Mask = 0x00020000 # bit 49
QV4_ValueTypeInternal_Empty_Type_Internal = QV4_Masks_Immediate_Mask | 0
QV4_ConvertibleToInt = QV4_Masks_Immediate_Mask | 0x10000 # bit 48
QV4_ValueTypeInternal_Null_Type_Internal = QV4_ConvertibleToInt | 0x08000
QV4_ValueTypeInternal_Boolean_Type_Internal = QV4_ConvertibleToInt | 0x04000
QV4_ValueTypeInternal_Integer_Type_Internal = QV4_ConvertibleToInt | 0x02000
QV4_ValueType_Undefined_Type = 0 # Dummy to make generic code below pass.
else:
QV4_NaNEncodeMask = 0xffff800000000000
QV4_Masks_Immediate_Mask = 0x00018000
QV4_IsInt32Mask = 0x0002000000000000
QV4_IsDoubleMask = 0xfffc000000000000
QV4_IsNumberMask = QV4_IsInt32Mask | QV4_IsDoubleMask
QV4_IsNullOrUndefinedMask = 0x0000800000000000
QV4_IsNullOrBooleanMask = 0x0001000000000000
QV4_Masks_NaN_Mask = 0x7ff80000
QV4_Masks_Type_Mask = 0xffff8000
QV4_Masks_IsDouble_Mask = 0xfffc0000
QV4_Masks_IsNullOrUndefined_Mask = 0x00008000
QV4_Masks_IsNullOrBoolean_Mask = 0x00010000
QV4_ValueType_Undefined_Type = QV4_Masks_IsNullOrUndefined_Mask
QV4_ValueType_Null_Type = QV4_Masks_IsNullOrUndefined_Mask \
| QV4_Masks_IsNullOrBoolean_Mask
QV4_ValueType_Boolean_Type = QV4_Masks_IsNullOrBoolean_Mask
QV4_ValueType_Integer_Type = 0x20000 | QV4_Masks_IsNullOrBoolean_Mask
QV4_ValueType_Managed_Type = 0
QV4_ValueType_Empty_Type = QV4_ValueType_Undefined_Type | 0x4000
QV4_ValueTypeInternal_Null_Type_Internal = QV4_ValueType_Null_Type
QV4_ValueTypeInternal_Boolean_Type_Internal = QV4_ValueType_Boolean_Type
QV4_ValueTypeInternal_Integer_Type_Internal = QV4_ValueType_Integer_Type
QV4_PointerMask = 0xfffffffffffffffd
QV4_Masks_Tag_Shift = 32
QV4_IsDouble_Shift = 64-14
QV4_IsNumber_Shift = 64-15
QV4_IsConvertibleToInt_Shift = 64-16
QV4_IsManaged_Shift = 64-17
v = value.split('Q')[0]
tag = v >> QV4_Masks_Tag_Shift
vtable = v & QV4_PointerMask
ns = d.qtNamespace()
if (v >> QV4_IsNumber_Shift) == 1:
d.putBetterType('%sQV4::Value (int32)' % ns)
vv = v & 0xffffffff
vv = vv if vv < 0x80000000 else -(0x100000000 - vv)
d.putBetterType('%sQV4::Value (int32)' % ns)
d.putValue('%d' % vv)
elif (v >> QV4_IsDouble_Shift):
d.putBetterType('%sQV4::Value (double)' % ns)
d.putValue('%x' % (v ^ QV4_NaNEncodeMask), 'float:8')
elif tag == QV4_ValueType_Undefined_Type and not new:
d.putBetterType('%sQV4::Value (undefined)' % ns)
d.putValue('(undefined)')
elif tag == QV4_ValueTypeInternal_Null_Type_Internal:
d.putBetterType('%sQV4::Value (null?)' % ns)
d.putValue('(null?)')
elif v == 0:
if new:
d.putBetterType('%sQV4::Value (undefined)' % ns)
d.putValue('(undefined)')
else:
d.putBetterType('%sQV4::Value (null)' % ns)
d.putValue('(null)')
#elif ((v >> QV4_IsManaged_Shift) & ~1) == 1:
# d.putBetterType('%sQV4::Value (null/undef)' % ns)
# d.putValue('(null/undef)')
#elif v & QV4_IsNullOrBooleanMask:
# d.putBetterType('%sQV4::Value (null/bool)' % ns)
# d.putValue('(null/bool)')
# d.putValue(v & 1)
else:
(parentv, flags, pad, className) = d.split('pIIp', vtable)
#vtable = value['m']['vtable']
if flags & 2: # isString'
d.putBetterType('%sQV4::Value (string)' % ns)
qdump__QV4__Heap__String(d, d.createValue(v, ns + 'QV4::Heap::String'))
#d.putStringValue(d.extractPointer(value) + 2 * d.ptrSize())
#d.putValue('ptr: 0x%x' % d.extractPointer(value))
return
elif flags & 4: # isObject
d.putBetterType('%sQV4::Value (object)' % ns)
#QV4_putObjectValue(d, d.extractPointer(value) + 2 * d.ptrSize())
arrayVTable = d.symbolAddress(ns + 'QV4::ArrayObject::static_vtbl')
#warn('ARRAY VTABLE: 0x%x' % arrayVTable)
d.putNumChild(1)
d.putItem(d.createValue(d.extractPointer(value) + 2 * d.ptrSize(), ns + 'QV4::Object'))
return
elif flags & 8: # isFunction
d.putBetterType('%sQV4::Value (function)' % ns)
d.putEmptyValue()
else:
d.putBetterType('%sQV4::Value (unknown)' % ns)
#d.putValue('[0x%x]' % v)
d.putValue('[0x%x : flag 0x%x : tag 0x%x]' % (v, flags, tag))
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % v)
d.putType(' ');
d.putNumChild(0)
with SubItem(d, '[vtable]'):
d.putItem(d.createValue(vtable, ns + 'QV4::VTable'))
d.putType(' ');
d.putNumChild(0)
d.putFields(value)
def qdump__QV__PropertyHashData(d, value):
(ref, alloc, size, numBits, entries) = value.split('IIIIp')
d.putItemCount(size)
if d.isExpanded():
with Children(d):
d.putFields(value)
def qdump__QV__PropertyHash(d, value):
qdump__QV__PropertyHashData(d, d.createValue(d.extractPointer(), value.type.name + 'Data'))
def qdump__QV4__Scoped(d, value):
innerType = value.type[0]
d.putItem(d.createValue(value.extractPointer(), innerType))
#d.putEmptyValue()
#if d.isExpanded():
# with Children(d):
# d.putSubItem('[]', d.createValue(value.extractPointer(), innerType))
# d.putFields(value)
def qdump__QV4__ScopedString(d, value):
innerType = value.type[0]
qdump__QV4__String(d, d.createValue(value.extractPointer(), innerType))
def qdump__QJSValue(d, value):
if d.ptrSize() == 4:
qdump_32__QJSValue(d, value)
else:
qdump_64__QJSValue(d, value)
def qdump_32__QJSValue(d, value):
ns = d.qtNamespace()
dd = value.split('I')[0]
d.putValue('[0x%x]' % dd)
if dd == 0:
d.putValue('(null)')
d.putType(value.type.name + ' (null)')
elif dd & 1:
variant = d.createValue(dd & ~3, ns + 'QVariant')
qdump__QVariant(d, variant)
d.putBetterType(d.currentType.value.replace('QVariant', 'QJSValue', 1))
elif dd & 3 == 0:
v4value = d.createValue(dd, ns + 'QV4::Value')
qdump_32__QV4__Value(d, v4value)
d.putBetterType(d.currentType.value.replace('QV4::Value', 'QJSValue', 1))
return
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % dd)
d.putType(' ');
d.putNumChild(0)
d.putFields(value)
def qdump_64__QJSValue(d, value):
ns = d.qtNamespace()
dd = value.split('Q')[0]
if dd == 0:
d.putValue('(null)')
d.putType(value.type.name + ' (null)')
elif dd & 1:
variant = d.createValue(dd & ~3, ns + 'QVariant')
qdump__QVariant(d, variant)
d.putBetterType(d.currentType.value.replace('QVariant', 'QJSValue', 1))
else:
d.putEmptyValue()
#qdump__QV4__Value(d, d.createValue(dd, ns + 'QV4::Value'))
#return
if d.isExpanded():
with Children(d):
with SubItem(d, '[raw]'):
d.putValue('[0x%x]' % dd)
d.putType(' ');
d.putNumChild(0)
d.putFields(value)
def qdump__QQmlBinding(d, value):
d.putEmptyValue()
if d.isExpanded():
with Children(d):
d.putCallItem('expressionIdentifier', '@QString',
value, 'expressionIdentifier')
d.putFields(value)
#######################################################################
#
# Webkit
#
#######################################################################
def jstagAsString(tag):
# enum { Int32Tag = 0xffffffff };
# enum { CellTag = 0xfffffffe };
# enum { TrueTag = 0xfffffffd };
# enum { FalseTag = 0xfffffffc };
# enum { NullTag = 0xfffffffb };
# enum { UndefinedTag = 0xfffffffa };
# enum { EmptyValueTag = 0xfffffff9 };
# enum { DeletedValueTag = 0xfffffff8 };
if tag == -1:
return 'Int32'
if tag == -2:
return 'Cell'
if tag == -3:
return 'True'
if tag == -4:
return 'Null'
if tag == -5:
return 'Undefined'
if tag == -6:
return 'Empty'
if tag == -7:
return 'Deleted'
return 'Unknown'
def qdump__QTJSC__JSValue(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
tag = value['u']['asBits']['tag']
payload = value['u']['asBits']['payload']
#d.putIntItem('tag', tag)
with SubItem(d, 'tag'):
d.putValue(jstagAsString(int(tag)))
d.putNoType()
d.putNumChild(0)
d.putIntItem('payload', int(payload))
d.putFields(value['u'])
if tag == -2:
cellType = d.lookupType('QTJSC::JSCell').pointer()
d.putSubItem('cell', payload.cast(cellType))
try:
# FIXME: This might not always be a variant.
delegateType = d.lookupType(d.qtNamespace() + 'QScript::QVariantDelegate').pointer()
delegate = scriptObject['d']['delegate'].cast(delegateType)
#d.putSubItem('delegate', delegate)
variant = delegate['m_value']
d.putSubItem('variant', variant)
except:
pass
def qdump__QScriptValue(d, value):
# structure:
# engine QScriptEnginePrivate
# jscValue QTJSC::JSValue
# next QScriptValuePrivate *
# numberValue 5.5987310416280426e-270 myns::qsreal
# prev QScriptValuePrivate *
# ref QBasicAtomicInt
# stringValue QString
# type QScriptValuePrivate::Type: { JavaScriptCore, Number, String }
#d.putEmptyValue()
dd = value['d_ptr']['d']
ns = d.qtNamespace()
if dd.pointer() == 0:
d.putValue('(invalid)')
d.putNumChild(0)
return
if int(dd['type']) == 1: # Number
d.putValue(dd['numberValue'])
d.putType('%sQScriptValue (Number)' % ns)
d.putNumChild(0)
return
if int(dd['type']) == 2: # String
d.putStringValue(dd['stringValue'])
d.putType('%sQScriptValue (String)' % ns)
return
d.putType('%sQScriptValue (JSCoreValue)' % ns)
x = dd['jscValue']['u']
tag = x['asBits']['tag']
payload = x['asBits']['payload']
#isValid = int(x['asBits']['tag']) != -6 # Empty
#isCell = int(x['asBits']['tag']) == -2
#warn('IS CELL: %s ' % isCell)
#isObject = False
#className = 'UNKNOWN NAME'
#if isCell:
# # isCell() && asCell()->isObject();
# # in cell: m_structure->typeInfo().type() == ObjectType;
# cellType = d.lookupType('QTJSC::JSCell').pointer()
# cell = payload.cast(cellType).dereference()
# dtype = 'NO DYNAMIC TYPE'
# try:
# dtype = cell.dynamic_type
# except:
# pass
# warn('DYNAMIC TYPE: %s' % dtype)
# warn('STATUC %s' % cell.type)
# type = cell['m_structure']['m_typeInfo']['m_type']
# isObject = int(type) == 7 # ObjectType;
# className = 'UNKNOWN NAME'
#warn('IS OBJECT: %s ' % isObject)
#inline bool JSCell::inherits(const ClassInfo* info) const
#for (const ClassInfo* ci = classInfo(); ci; ci = ci->parentClass) {
# if (ci == info)
# return true;
#return false;
try:
# This might already fail for 'native' payloads.
scriptObjectType = d.lookupType(ns + 'QScriptObject').pointer()
scriptObject = payload.cast(scriptObjectType)
# FIXME: This might not always be a variant.
delegateType = d.lookupType(ns + 'QScript::QVariantDelegate').pointer()
delegate = scriptObject['d']['delegate'].cast(delegateType)
#d.putSubItem('delegate', delegate)
variant = delegate['m_value']
#d.putSubItem('variant', variant)
t = qdump__QVariant(d, variant)
# Override the 'QVariant (foo)' output
d.putBetterType('%sQScriptValue (%s)' % (ns, t))
if t != 'JSCoreValue':
return
except:
pass
# This is a 'native' JSCore type for e.g. QDateTime.
d.putValue('<native>')
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putSubItem('jscValue', dd['jscValue'])
def qdump__QQmlAccessorProperties__Properties(d, value):
size = int(value['count'])
d.putItemCount(size)
if d.isExpanded():
d.putArrayData(value['properties'], size)
#
# QJson
#
def qdumpHelper_qle_cutBits(value, offset, length):
return (value >> offset) & ((1 << length) - 1)
def qdump__QJsonPrivate__qle_bitfield(d, value):
offset = value.type[0]
length = value.type[1]
val = value['val'].integer()
d.putValue('%s' % qdumpHelper_qle_cutBits(val, offset, length))
d.putNumChild(0)
def qdumpHelper_qle_signedbitfield_value(d, value):
offset = value.type[0]
length = value.type[1]
val = value['val'].integer()
val = (val >> offset) & ((1 << length) - 1)
if val >= (1 << (length - 1)):
val -= (1 << (length - 1))
return val
def qdump__QJsonPrivate__qle_signedbitfield(d, value):
d.putValue('%s' % qdumpHelper_qle_signedbitfield_value(d, value))
d.putNumChild(0)
def qdump__QJsonPrivate__q_littleendian(d, value):
d.putValue('%s' % value['val'].integer())
d.putNumChild(0)
def qdumpHelper_QJsonValue(d, data, base, pv):
"""
Parameters are the parameters to the
QJsonValue(QJsonPrivate::Data *data, QJsonPrivate::Base *base,
const QJsonPrivate::Value& pv)
constructor. We 'inline' the construction here.
data is passed as pointer integer
base is passed as pointer integer
pv is passed as 32 bit integer.
"""
d.checkIntType(data)
d.checkIntType(base)
d.checkIntType(pv)
t = qdumpHelper_qle_cutBits(pv, 0, 3)
v = qdumpHelper_qle_cutBits(pv, 5, 27)
latinOrIntValue = qdumpHelper_qle_cutBits(pv, 3, 1)
if t == 0:
d.putType('QJsonValue (Null)')
d.putValue('Null')
d.putNumChild(0)
return
if t == 1:
d.putType('QJsonValue (Bool)')
d.putValue('true' if v else 'false')
d.putNumChild(0)
return
if t == 2:
d.putType('QJsonValue (Number)')
if latinOrIntValue:
w = toInteger(v)
if w >= 0x4000000:
w -= 0x8000000
d.putValue(w)
else:
data = base + v
f = d.split('d', data)[0]
d.putValue(str(f))
d.putNumChild(0)
return
if t == 3:
d.putType('QJsonValue (String)')
data = base + v;
if latinOrIntValue:
length = d.extractUShort(data)
d.putValue(d.readMemory(data + 2, length), 'latin1')
else:
length = d.extractUInt(data)
d.putValue(d.readMemory(data + 4, length * 2), 'utf16')
d.putNumChild(0)
return
if t == 4:
d.putType('QJsonValue (Array)')
qdumpHelper_QJsonArray(d, data, base + v)
return
if t == 5:
d.putType('QJsonValue (Object)')
qdumpHelper_QJsonObject(d, data, base + v)
d.putNumChild(0)
def qdumpHelper_QJsonArray(d, data, array):
"""
Parameters are the parameters to the
QJsonArray(QJsonPrivate::Data *data, QJsonPrivate::Array *array)
constructor. We 'inline' the construction here.
array is passed as integer pointer to the QJsonPrivate::Base object.
"""
if data:
# The 'length' part of the _dummy member:
n = qdumpHelper_qle_cutBits(d.extractUInt(array + 4), 1, 31)
else:
n = 0
d.putItemCount(n)
if d.isExpanded():
with Children(d, maxNumChild=1000):
table = array + d.extractUInt(array + 8)
for i in range(n):
with SubItem(d, i):
qdumpHelper_QJsonValue(d, data, array, d.extractUInt(table + 4 * i))
def qdumpHelper_QJsonObject(d, data, obj):
"""
Parameters are the parameters to the
QJsonObject(QJsonPrivate::Data *data, QJsonPrivate::Object *object);
constructor. We "inline" the construction here.
obj is passed as integer pointer to the QJsonPrivate::Base object.
"""
if data:
# The 'length' part of the _dummy member:
n = qdumpHelper_qle_cutBits(d.extractUInt(obj + 4), 1, 31)
else:
n = 0
d.putItemCount(n)
if d.isExpanded():
with Children(d, maxNumChild=1000):
table = obj + d.extractUInt(obj + 8)
for i in range(n):
with SubItem(d, i):
entryPtr = table + 4 * i # entryAt(i)
entryStart = obj + d.extractUInt(entryPtr) # Entry::value
keyStart = entryStart + 4 # sizeof(QJsonPrivate::Entry) == 4
val = d.extractInt(entryStart)
key = d.extractInt(keyStart)
isLatinKey = qdumpHelper_qle_cutBits(val, 4, 1)
if isLatinKey:
keyLength = d.extractUShort(keyStart)
d.putField('key', d.readMemory(keyStart + 2, keyLength))
d.putField('keyencoded', 'latin1')
else:
keyLength = d.extractUInt(keyStart)
d.putField('key', d.readMemory(keyStart + 4, keyLength))
d.putField('keyencoded', 'utf16')
qdumpHelper_QJsonValue(d, data, obj, val)
def qdump__QJsonValue(d, value):
(data, dd, t) = value.split('QpI')
if t == 0:
d.putType('QJsonValue (Null)')
d.putValue('Null')
d.putNumChild(0)
return
if t == 1:
d.putType('QJsonValue (Bool)')
v = value.split('b')
d.putValue('true' if v else 'false')
d.putNumChild(0)
return
if t == 2:
d.putType('QJsonValue (Number)')
d.putValue(value.split('d'))
d.putNumChild(0)
return
if t == 3:
d.putType('QJsonValue (String)')
elided, base = d.encodeStringHelper(data, d.displayStringLimit)
d.putValue(base, 'utf16', elided=elided)
d.putNumChild(0)
return
if t == 4:
d.putType('QJsonValue (Array)')
qdumpHelper_QJsonArray(d, dd, data)
return
if t == 5:
d.putType('QJsonValue (Object)')
qdumpHelper_QJsonObject(d, dd, data)
return
d.putType('QJsonValue (Undefined)')
d.putEmptyValue()
d.putNumChild(0)
def qdump__QJsonArray(d, value):
qdumpHelper_QJsonArray(d, value['d'].pointer(), value['a'].pointer())
def qdump__QJsonObject(d, value):
qdumpHelper_QJsonObject(d, value['d'].pointer(), value['o'].pointer())
def qdump__QSqlResultPrivate(d, value):
# QSqlResult *q_ptr;
# QPointer<QSqlDriver> sqldriver;
# int idx;
# QString sql;
# bool active;
# bool isSel;
# QSqlError error;
# bool forwardOnly;
# QSql::NumericalPrecisionPolicy precisionPolicy;
# int bindCount;
# QSqlResult::BindingSyntax binds;
# QString executedQuery;
# QHash<int, QSql::ParamType> types;
# QVector<QVariant> values;
# QHash<QString, QList<int> > indexes;
# QVector<QHolder> holders
vptr, qptr, sqldriver1, sqldriver2, idx, pad, sql, active, isSel, pad, \
error1, error2, error3, \
forwardOnly, pad, precisionPolicy, bindCount, \
binds, executedQuery, types, values, indexes, holders = \
value.split('ppppi@{QString}bb@pppb@iiii{QString}ppp')
d.putStringValue(sql)
d.putPlainChildren(value)
def qdump__QSqlField(d, value):
val, dptr = value.split('{QVariant}p')
d.putNumChild(1)
qdump__QVariant(d, val)
d.putBetterType(d.currentType.value.replace('QVariant', 'QSqlField'))
d.putPlainChildren(value)
def qdump__QLazilyAllocated(d, value):
p = value.extractPointer()
if p == 0:
d.putValue("(null)")
d.putNumChild(0)
else:
d.putItem(d.createValue(p, value.type[0]))
d.putBetterType(value.type)
def qdump__qfloat16(d, value):
h = value.split('H')[0]
# Stole^H^H^HHeavily inspired by J.F. Sebastian at
# http://math.stackexchange.com/questions/1128204/how-to-convert-
# from-floating-point-binary-to-decimal-in-half-precision16-bits
sign = h >> 15
exp = (h >> 10) & 0b011111
fraction = h & (2**10 - 1)
if exp == 0:
if fraction == 0:
res = -0.0 if sign else 0.0
else:
res = (-1)**sign * fraction / 2**10 * 2**(-14) # subnormal
elif exp == 0b11111:
res = ('-inf' if sign else 'inf') if fraction == 0 else 'nan'
else:
res = (-1)**sign * (1 + 1. * fraction / 2**10) * 2**(exp - 15)
d.putValue(res)
d.putNumChild(1)
d.putPlainChildren(value)
|
gpl-3.0
| 5,603,281,900,807,769,000
| 33.923337
| 103
| 0.568351
| false
| 3.293144
| false
| false
| false
|
dmpayton/django-fbi
|
django_fbi/views.py
|
1
|
1651
|
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django_fbi.app import apps
from django_fbi.backends import get_backend
from django_fbi.models import FacebookAccount
from django_fbi.signals import facebook_deauthorize
FBI_BACKEND = getattr(settings, 'FBI_BACKEND', 'django_fbi.backends.DefaultBackend')
def channel(request):
return HttpResponse('<script src="//connect.facebook.net/en_US/all.js"></script>')
def connect(request):
facebook_backend = get_backend(FBI_BACKEND)
return facebook_backend(request).connect_view()
connect = never_cache(connect)
def deauthorize(request):
facebook_backend = get_backend(FBI_BACKEND)
return facebook_backend(request).deauthorize_view()
deauthorize = csrf_exempt(deauthorize)
deauthorize = never_cache(deauthorize)
def view_app(request, slug, page):
try:
## Check the registry to see if we have Python app.
app = apps[slug]
return getattr('%s_view' % page, app)(request)
except (KeyError, NotImplemented):
## Nothing registered, check the database.
app = get_object_or_404(FacebookApp, namespace=slug)
context = RequestContext(request, {'app': app})
page_template = getattr(app, '%s_template' % page)
if page_template:
return render_to_response(page_template, context)
page_content = getattr(app, '%s_content' % page)
return HttpResponse(Template(page_content).render(context))
|
mit
| 6,748,672,571,937,231,000
| 40.275
| 86
| 0.727438
| false
| 3.786697
| false
| false
| false
|
dotKom/onlineweb4
|
utils/helpers.py
|
1
|
1794
|
# -*- coding: utf8 -*-
import json
from datetime import date, datetime
import pytz
from django.conf import settings
from django.db import models
from django.db.models.query import QuerySet
from django.utils.timezone import make_aware
class JsonHelper(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime("%d.%m.%Y %H.%M")
elif isinstance(obj, date):
return obj.strftime("%d.%m.%Y")
elif isinstance(obj, models.Model):
return obj.serializable_object()
elif isinstance(obj, QuerySet):
return list(obj)
return json.JSONEncoder.default(self, obj)
def humanize_size(size, suffix='B'):
"""
Converts an integer of bytes to a properly scaled human readable
string.
Example:
>>> humanize_size(15298253)
'14.6MB'
:param size: The size of the object in bytes as an integer
:return: A string of the formatted file size
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(size) < 1024.0:
return '%.1f%s%s' % (size, unit, suffix)
size /= 1024.0
return '%.1f%s%s' % (size, '', suffix)
def timezoneaware(dt, current_tz=settings.TIME_ZONE, is_dst=False):
"""
Transforms a potentially naive datetime into a timezone aware datetime,
by utilizing the locale setting from settigs.py
:param dt: A naive datetime instance.
:param is_dst: Boolean: Are we currently under daylight savings time (summertime)?
:return: A timezone-aware datetime
"""
tz = current_tz
try:
aware_dt = make_aware(dt, timezone=tz)
except (pytz.NonExistentTimeError, pytz.AmbiguousTimeError):
aware_dt = tz.localize(dt, is_dst=is_dst)
return aware_dt
|
mit
| -845,939,318,202,271,400
| 26.6
| 86
| 0.636009
| false
| 3.683778
| false
| false
| false
|
scott-maddox/openbandparams
|
src/openbandparams/examples/advanced/GaInAsSb_on_GaSb/Plot_Strained_Band_Offset_vs_Composition_of_Quaternary3.py
|
1
|
2475
|
#
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of openbandparams.
#
# openbandparams is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# openbandparams is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with openbandparams. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# Make sure we import the local openbandparams version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..')))
from openbandparams import *
import matplotlib.pyplot as plt
import numpy
# Type 3 Quaternary
alloy = GaInAsSb
# calculate the data
T = 300 # K
N = 100
xs = numpy.linspace(0, 1, N)
ys = numpy.linspace(0, 1, N)
X, Y = numpy.meshgrid(xs, ys)
Z = numpy.empty(shape=(N, N), dtype=numpy.double)
W = numpy.empty(shape=(N, N), dtype=numpy.double)
for i in xrange(N):
for j in xrange(N):
strained = alloy(x=X[i, j], y=Y[i, j]).strained_001(GaSb)
strain = strained.strain_out_of_plane(T=T)
if not (0. <= strain <= 0.03):
Z[i, j] = numpy.nan
W[i, j] = numpy.nan
else:
Z[i, j] = strained.VBO_hh(T=T) - GaSb.VBO()
W[i, j] = GaSb.CBO() - strained.CBO(T=T)
# plot it
fig = plt.figure()
CS = plt.contour(1-X, 1-Y, Z, 14, colors='r')
plt.clabel(CS, inline=True, fontsize=10)
CS2 = plt.contour(1-X, 1-Y, W, 12, colors='b')
plt.clabel(CS2, inline=True, fontsize=10)
plt.title('$%s/GaSb$ from 0 to 3%% strain (T = %.0f K)' % (alloy.latex(), T))
plt.xlabel('%s fraction' % alloy.elements[1])
plt.ylabel('%s fraction' % alloy.elements[3])
plt.plot([numpy.nan], [numpy.nan], 'b-', label='Conduction Band Offset')
plt.plot([numpy.nan], [numpy.nan], 'r-', label='Valance Band Offset')
plt.legend(loc='lower left')
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
output_filename = sys.argv[1]
plt.savefig(output_filename)
else:
plt.show()
|
agpl-3.0
| 8,786,756,771,074,003,000
| 33.873239
| 77
| 0.628283
| false
| 3.025672
| false
| false
| false
|
gbiggs/rtcshell
|
rtcshell/rtls.py
|
1
|
13317
|
#!/usr/bin/env python
# -*- Python -*-
# -*- coding: utf-8 -*-
'''rtcshell
Copyright (C) 2009-2010
Geoffrey Biggs
RT-Synthesis Research Group
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed under the Eclipse Public License -v 1.0 (EPL)
http://www.opensource.org/licenses/eclipse-1.0.txt
File: rtls.py
Implementation of the command to list naming contexts.
'''
# $Source$
from optparse import OptionParser, OptionError
import os
from rtctree.exceptions import RtcTreeError
from rtctree.tree import create_rtctree, InvalidServiceError, \
FailedToNarrowRootNamingError, \
NonRootPathError
from rtctree.path import parse_path
from rtctree.utils import build_attr_string, colour_supported, \
get_num_columns_and_rows, get_terminal_size
import sys
from rtcshell import RTSH_PATH_USAGE, RTSH_VERSION
from rtcshell.path import cmd_path_to_full_path
def get_node_long_lines(nodes, use_colour=True):
info_strings = []
state_width = 0
total_width = 0
in_width = 0
out_width = 0
svc_width = 0
for node in nodes:
if node.is_directory:
if state_width == 0:
state_width = 1
if total_width == 0:
total_width = 1
if in_width == 0:
in_width = 1
if out_width == 0:
out_width = 1
if svc_width == 0:
svc_width = 1
name = build_attr_string(['bold', 'blue'],
supported=use_colour) + \
node.name + build_attr_string(['reset'],
supported=use_colour)
info_strings.append((('-', 0), ('-', 0), ('-', 0),
('-', 0), ('-', 0), name))
elif node.is_manager:
# Managers are not handled yet
if state_width == 0:
state_width = 1
if total_width == 0:
total_width = 1
if in_width == 0:
in_width = 1
if out_width == 0:
out_width = 1
if svc_width == 0:
svc_width = 1
name = build_attr_string(['bold', 'green'],
supported=use_colour) + \
node.name + build_attr_string(['reset'],
supported=use_colour)
info_strings.append((('-', 0), ('-', 0), ('-', 0),
('-', 0), ('-', 0), name))
elif node.is_component:
state = node.state
state_string = node.plain_state_string
if len(state_string) > state_width:
state_width = len(state_string)
state_string = (node.get_state_string(add_colour=use_colour),
len(node.get_state_string(add_colour=use_colour)) - \
len(state_string))
num_ports = len(node.ports)
num_connected = len(node.connected_ports)
total_string = '{0}/{1}'.format(num_ports, num_connected)
if len(total_string) > total_width:
total_width = len(total_string)
coloured_string = build_attr_string('bold',
supported=use_colour) + \
str(num_ports) + \
build_attr_string('reset',
supported=use_colour) + '/' + \
str(num_connected)
total_string = (coloured_string, len(coloured_string) - \
len(total_string))
num_ports = len(node.inports)
num_connected = len(node.connected_inports)
in_string = '{0}/{1}'.format(num_ports, num_connected)
if len(in_string) > in_width:
in_width = len(in_string)
coloured_string = build_attr_string('bold',
supported=use_colour) + \
str(num_ports) + \
build_attr_string('reset',
supported=use_colour) + '/' + \
str(num_connected)
in_string = (coloured_string, len(coloured_string) - \
len(in_string))
num_ports = len(node.outports)
num_connected = len(node.connected_outports)
out_string = '{0}/{1}'.format(num_ports, num_connected)
if len(out_string) > out_width:
out_width = len(out_string)
coloured_string = build_attr_string('bold',
supported=use_colour) + \
str(num_ports) + \
build_attr_string('reset',
supported=use_colour) + '/' + \
str(num_connected)
out_string = (coloured_string, len(coloured_string) - \
len(out_string))
num_ports = len(node.svcports)
num_connected = len(node.connected_svcports)
svc_string = '{0}/{1}'.format(num_ports, num_connected)
if len(svc_string) > svc_width:
svc_width = len(svc_string)
coloured_string = build_attr_string('bold',
supported=use_colour) + \
str(num_ports) + \
build_attr_string('reset',
supported=use_colour) + '/' + \
str(num_connected)
svc_string = (coloured_string, len(coloured_string) - \
len(svc_string))
info_strings.append((state_string, total_string, in_string,
out_string, svc_string, node.name))
else:
# Other types are unknowns
if state_width == 0:
state_width = 1
if total_width == 0:
total_width = 1
if in_width == 0:
in_width = 1
if out_width == 0:
out_width = 1
if svc_width == 0:
svc_width = 1
name = build_attr_string(['faint', 'white'],
supported=use_colour) + \
node.name + build_attr_string(['reset'],
supported=use_colour)
info_strings.append((('-', 0), ('-', 0), ('-', 0),
('-', 0), ('-', 0), name))
state_width += 2
total_width += 2
in_width += 2
out_width += 2
svc_width += 2
result = []
for string in info_strings:
result.append('{0}{1}{2}{3}{4}{5}'.format(
string[0][0].ljust(state_width + string[0][1]),
string[1][0].ljust(total_width + string[1][1]),
string[2][0].ljust(in_width + string[2][1]),
string[3][0].ljust(out_width + string[3][1]),
string[4][0].ljust(svc_width + string[4][1]),
string[5]))
return result
def format_items_list(items):
gap = ' '
term_rows, term_cols = get_terminal_size()
nrows, ncols, col_widths = get_num_columns_and_rows([len(ii[1]) \
for ii in items], len(gap), term_cols)
rows = [items[s:s + ncols] for s in range(0, len(items), ncols)]
lines = []
for r in rows:
new_line = ''
for ii, c in enumerate(r):
new_line += '{0:{1}}'.format(c[0], col_widths[ii] + \
(len(c[0]) - len(c[1]))) + gap
lines.append(new_line.rstrip())
return lines
def list_directory(dir_node, long=False):
listing = dir_node.children
use_colour = colour_supported(sys.stdout)
if long:
lines = get_node_long_lines(listing, use_colour=use_colour)
return lines
else:
items = []
for entry in listing:
if entry.is_directory:
items.append((build_attr_string(['bold', 'blue'],
supported=use_colour) + \
entry.name + '/' + \
build_attr_string(['reset'],
supported=use_colour),
entry.name))
elif entry.is_component:
items.append((entry.name, entry.name))
elif entry.is_manager:
items.append((build_attr_string(['bold', 'green'],
supported=use_colour) + \
entry.name + \
build_attr_string(['reset'],
supported=use_colour),
entry.name))
else:
items.append((build_attr_string(['faint', 'white'],
supported=use_colour) + \
entry.name + \
build_attr_string(['reset'],
supported=use_colour),
entry.name))
return format_items_list(items)
def list_target(cmd_path, full_path, options, tree=None):
path, port = parse_path(full_path)
if port:
# Can't list a port
print >>sys.stderr, '{0}: Cannot access {1}: No such directory or \
object.'.format(sys.argv[0], cmd_path)
return 1
trailing_slash = False
if not path[-1]:
# There was a trailing slash
trailing_slash = True
path = path[:-1]
if not tree:
tree = create_rtctree(paths=path)
if not tree:
return 1
if not tree.has_path(path):
print >>sys.stderr, '{0}: Cannot access {1}: No such directory or \
object.'.format(sys.argv[0], cmd_path)
return 1
if tree.is_component(path):
# Path points to a single component: print it like 'ls <file>'.
if trailing_slash:
# If there was a trailing slash, complain that a component is not a
# directory.
print >>sys.stderr, '{0}: cannot access {1}: Not a \
directory.'.format(sys.argv[0], address)
return 1
if options.long:
lines = get_node_long_lines([tree.get_node(path)],
sys.stdout.isatty())
for l in lines:
print l
else:
print path[-1]
elif tree.is_directory(path):
# If recursing, need to list this directory and all its children
if options.recurse:
recurse_root = tree.get_node(path)
recurse_root_path = recurse_root.full_path
def get_name(node, args):
if node.full_path.startswith(recurse_root_path):
result = node.full_path[len(recurse_root_path):]
else:
result = node.full_path
return result.lstrip('/')
dir_names = ['.'] + recurse_root.iterate(get_name,
args=options.long, filter=['is_directory'])[1:]
listings = recurse_root.iterate(list_directory,
args=options.long, filter=['is_directory'])
for dir, listing in zip(dir_names, listings):
if dir == '.':
print '.:'
else:
print './' + dir + ':'
for l in listing:
print l
print
else:
dir_node = tree.get_node(path)
lines = list_directory(dir_node, options.long)
for l in lines:
print l
else:
print >>sys.stderr, '{0}: cannot access {1}: Unknown object \
type.'.format(sys.argv[0], cmd_path)
return 1
return 0
def main(argv=None, tree=None):
usage = '''Usage: %prog [options] [path]
List a name server, directory, manager or component.
Equivalent to the POSIX 'ls' command.
The long display shows the following information in columns:
State
Total number of ports/Total connected
Input ports/Inputs connected
Output ports/Outputs connected
Service ports/Service connected
Name
''' + RTSH_PATH_USAGE
version = RTSH_VERSION
parser = OptionParser(usage=usage, version=version)
parser.add_option('-l', dest='long', action='store_true', default=False,
help='Use a long listing format.')
parser.add_option('-d', '--debug', dest='debug', action='store_true',
default=False, help='Print debugging information. \
[Default: %default]')
parser.add_option('-R', '--recurse', dest='recurse', action='store_true',
default=False, help='List recursively. [Default: %default]')
if argv:
sys.argv = [sys.argv[0]] + argv
try:
options, args = parser.parse_args()
except OptionError, e:
print 'OptionError:', e
return 1
if not args:
cmd_path = ''
elif len(args) == 1:
cmd_path = args[0]
else:
print >>sys.stderr, usage
return 1
full_path = cmd_path_to_full_path(cmd_path)
return list_target(cmd_path, full_path, options, tree)
# vim: tw=79
|
epl-1.0
| 7,676,000,197,927,443,000
| 36.198324
| 79
| 0.490501
| false
| 4.146015
| false
| false
| false
|
nodebox/nodebox-opengl
|
examples/07-filter/05-render.py
|
1
|
2168
|
# Add the upper directory (where the nodebox module is) to the search path.
import os, sys; sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics import *
# The render() command executes a function with drawing commands
# in an offscreen (i.e. hidden) canvas and returns an Image object.
# This is useful if you want to apply filters to text, ellipses, etc.
def hello():
fill(1, 0, 0, 0.5) # Transparent red.
ellipse(120, 120, 200, 200)
fill(0, 1, 0, 0.5) # Transparent green.
ellipse(170, 120, 200, 200)
fill(0, 0, 1, 0.5) # Transparent blue.
ellipse(145, 160, 200, 200)
fill(0)
font("Droid Serif")
text("hello", x=0, y=90, fontsize=70, width=300, align=CENTER)
# We call this a "procedural" image, because it is entirely created in code.
# Procedural images can be useful in many ways:
# - applying effects to text,
# - caching a complex composition that is not frequently updated (for speed),
# - creating on-the-fly textures or shapes that are different every time,
# - using NodeBox from the command line without opening an application window.
img = render(function=hello, width=300, height=300)
# Note that we make the width and height of the offscreen canvas
# a little bit larger than the actual composition.
# This creates a transparent border, so effects don't get cut off
# at the edge of the rendered image.
# Images can be saved to file, even without starting canvas.run().
# To try it out, uncomment the following line:
#img.save("hello.png")
def draw(canvas):
canvas.clear()
# Apply a blur filter to the procedural image and draw it.
image(blur(img, scale=canvas.mouse.relative_x), 20, 100)
# Compare to the same shapes drawn directly to the canvas.
# You may notice that the rendered image has jagged edges...
# For now, there is nothing to be done about that - a soft blur can help.
translate(300,100)
fill(1, 0, 0, 0.5)
ellipse(120, 120, 200, 200)
fill(0, 1, 0, 0.5)
ellipse(170, 120, 200, 200)
fill(0, 0, 1, 0.5)
ellipse(145, 160, 200, 200)
# Start the application:
canvas.fps = 60
canvas.size = 600, 500
canvas.run(draw)
|
bsd-3-clause
| 8,009,919,124,711,394,000
| 36.37931
| 78
| 0.686808
| false
| 3.325153
| false
| false
| false
|
ace02000/pyload
|
module/plugins/accounts/SimplydebridCom.py
|
1
|
1696
|
# -*- coding: utf-8 -*-
import time
from module.plugins.internal.Account import Account
class SimplydebridCom(Account):
__name__ = "SimplydebridCom"
__type__ = "account"
__version__ = "0.15"
__status__ = "testing"
__config__ = [("mh_mode" , "all;listed;unlisted", "Filter hosters to use" , "all"),
("mh_list" , "str" , "Hoster list (comma separated)", "" ),
("mh_interval", "int" , "Reload interval in minutes" , 60 )]
__description__ = """Simply-Debrid.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("Kagenoshin", "kagenoshin@gmx.ch")]
def grab_hosters(self, user, password, data):
html = self.load("http://simply-debrid.com/api.php", get={'list': 1})
return [x.strip() for x in html.rstrip(';').replace("\"", "").split(";")]
def grab_info(self, user, password, data):
res = self.load("http://simply-debrid.com/api.php",
get={'login': 2,
'u' : user,
'p' : password})
data = [x.strip() for x in res.split(";")]
if str(data[0]) != "1":
return {'premium': False}
else:
return {'trafficleft': -1, 'validuntil': time.mktime(time.strptime(str(data[2]), "%d/%m/%Y"))}
def signin(self, user, password, data):
res = self.load("https://simply-debrid.com/api.php",
get={'login': 1,
'u' : user,
'p' : password})
if res != "02: loggin success":
self.fail_login()
|
gpl-3.0
| 2,483,385,987,593,713,700
| 35.869565
| 106
| 0.470519
| false
| 3.518672
| false
| false
| false
|
aliciawyy/CompInvest
|
load/load_data.py
|
1
|
1779
|
"""
This is the general interface to load data, either we want to
load the data from internet through panda or load local data through
QSTK for tests.
@author Alice Wang
"""
import pandas.io.data as web
from load_local_data import load_local_data_from_yahoo
def load_stock_close_price(start_date, end_date, ls_symbols, source='yahoo'):
"""
@param start_date:
@param end_date:
@param ls_symbols:
@param source:
@return: The close prices of given symbols
"""
if source == 'google':
close_key = 'Close'
elif source == 'yahoo':
close_key = 'Adj Close'
elif source == 'local':
close_key = 'close'
all_stock_data = load_all_stock_data(start_date, end_date, ls_symbols, source)
stock_close_prices = all_stock_data[close_key]
stock_close_prices = stock_close_prices.fillna(method='ffill')
stock_close_prices = stock_close_prices.fillna(method='bfill')
stock_close_prices = stock_close_prices.fillna(1.0)
return stock_close_prices
def load_all_stock_data(start_date, end_date, ls_symbols, source='yahoo'):
"""
@param start_date: start date of loading
@param end_date: end date of loading
@param ls_symbols: list of symbols
@param source: source, to load from 'google', 'yahoo' or 'local'
"""
acceptable_sources = frozenset(['google', 'yahoo', 'local'])
if source not in acceptable_sources:
raise ValueError('The given source %s is not in acceptable sources %s' % (
source, str(acceptable_sources)))
if source == 'local':
all_stock_data = load_local_data_from_yahoo(start_date, end_date, ls_symbols)
else:
all_stock_data = web.DataReader(ls_symbols, source, start=start_date, end=end_date)
return all_stock_data
|
mit
| 1,172,195,368,799,243,300
| 32.566038
| 91
| 0.66498
| false
| 3.467836
| false
| false
| false
|
edisonlz/fruit
|
web_project/base/site-packages/redis_model/models/dattributes.py
|
1
|
28317
|
# -*- coding: UTF-8 -*-
import os,sys
import datetime
import time
from redis_client import RedisClient
import types
import logging
#加载配置
import setting
from setting import logger
try:
from functools import wraps, update_wrapper
except ImportError:
from django.utils.functional import wraps, update_wrapper # Python 2.3, 2.4 fallback.
##########################Util Lib#############################
def queryset_to_dict(qs, key='pk'):
"""
Given a queryset will transform it into a dictionary based on ``key``.
param:
qs:queryset
key:string default is 'pk'
return:
dict
"""
return dict((getattr(u, key), u) for u in qs)
def distinct(l):
"""
Given an iterable will return a list of all distinct values.
param:
l:an iterable
return:
the list
"""
return list(set(l))
def attach_OneToOne(objects, model, field):
"""
Shortcut method which handles a pythonic LEFT OUTER JOIN.
``attach_foreignkey(posts, Post.thread)``
param:
objects:object of list
model: object
field:string
"""
try:
qs = model.objects.filter(pk__in=distinct(getattr(o, "pk") for o in objects))
queryset = queryset_to_dict(qs)
for o in objects:
setattr(o, '_%s_cache' % (field), queryset.get(getattr(o, "pk")))
#print getattr(o, '_%s_cache' % (field))
#print o.userinfo
except Exception,e:
print e
def attach_foreignkey(objects, field, qs=None):
"""
Shortcut method which handles a pythonic LEFT OUTER JOIN.
``attach_foreignkey(posts, Post.thread)``
param:
objects:object of list
field:string
qs:query set default is None
"""
try:
t1 = time.time()
field = field.field
if qs is None:
qs = field.rel.to.objects.all()
qs = qs.filter(pk__in=distinct(getattr(o, field.column) for o in objects))
#if select_related:
# qs = qs.select_related(*select_related)
queryset = queryset_to_dict(qs)
if queryset:
for o in objects:
setattr(o, '_%s_cache' % (field.name), queryset.get(getattr(o, field.column)))
#print "attach_foreignkey use %s s " % (time.time() - t1)
except Exception,e:
print e
##########################Util Lib#############################
def find_include(ref_klass,pks,kwargs):
"""
search the related object from current object
param;
ref_klass:related classs
pks:primary key
**kwargs:
order_by_score: True or False
include_select_related_model:True or False
include:True or False
select_related:True or False
"""
if not pks:
return []
order_by_score = kwargs.get("order_by_score",False)
include_select_related_model = kwargs.get("include_select_related_model")
#默认是开启select_related()
model = kwargs.get("include")
if model:
#1.fitler objects
#print "model_name %s:" % model.__name__
#print ref_klass.__name__,ref_klass.objects
#mobjs = ref_klass.objects.filter(id__in=pks).order_by('-pk')
n = datetime.datetime.now()
if order_by_score:
ids = ",".join(pks)
if ref_klass.__name__.lower() != "user":
sql = "SELECT * FROM %s where id in (%s) and status in(0,1) order by FIELD(id, %s)" % (ref_klass._meta.db_table,ids,ids)
else:
sql = "SELECT * FROM %s where id in (%s) order by FIELD(id, %s)" % (ref_klass._meta.db_table,ids,ids)
mobjs = ref_klass.objects.raw(sql)
else:
mobjs = ref_klass.objects.filter(id__in=pks)
logging.debug(" %s include use: %s" % (ref_klass,datetime.datetime.now() - n))
n = datetime.datetime.now()
#2.fitler relate objects
relate_ids = set()
for obj in mobjs:
v = getattr(obj,"%s_id" % model.__name__.lower())
if v:
relate_ids.add(v)
#print "relate_ids %s:" % relate_ids
#3.0 得到relate ID
if relate_ids:
robjs = model.objects.filter(id__in=tuple(relate_ids))
#print "relate_ids len %s:" % len(robjs)
rdic = {}
for r in robjs:
rdic[r.id] = r
if include_select_related_model:
#print "deal userinfo"
attach_OneToOne(robjs,include_select_related_model,include_select_related_model.__name__.lower())
#3.set relate objects
for obj in mobjs:
setattr(obj,model.__name__.lower(),rdic.get(getattr(obj,"%s_id" % model.__name__.lower())))
logging.debug(" %s relate add use: %s" % (ref_klass,datetime.datetime.now() - n))
#4.返回关联对象
return mobjs
elif kwargs.get("select_related",False):
return ref_klass.objects.select_related(depth=1).filter(id__in=pks)
else:
if order_by_score:
ids = ",".join(pks)
if ref_klass.__name__.lower() != "user":
sql = "SELECT * FROM %s where id in (%s) and status in (0,1) order by FIELD(id, %s)" % (ref_klass._meta.db_table,ids,ids)
else:
sql = "SELECT * FROM %s where id in (%s) order by FIELD(id, %s)" % (ref_klass._meta.db_table,ids,ids)
data = []
for d in ref_klass.objects.raw(sql):
data.append(d)
return data
else:
data = ref_klass.objects.filter(id__in=pks)
return data
class DAttribute(object):
def __init__(self):
"""
intialize base object reference object decsription
"""
#Base Object
self.bo = None
self.ref = None
self.descrpition = ""
def change_log(self,oper,obj_id,baseobj,pipe=None,score=None):
"""
save the relation of Reference
list|sortset:insert:user_posts:user_id:post_id
list|sortset:delete:user_posts:user_id:post_id
param:
oper: the operation type is string
obj_id: id of object type is integer
baseobj: base object
pipe: redis pipe default is None
score: use rank
"""
#是否启用数据同步
if not setting.DATA_SYNC:
return
#初始化服务
dp = pipe or RedisClient.getInstance().redis
#保存chang_log
#String = 操作符: 主类型_引用类型s : 主类型ID: 此类型ID
basetype = str(baseobj.__class__.__name__).lower()
ref = self.ref.lower()
if basetype == ref:
ref = self.name.lower()
if oper.startswith("sortset"):
val = "%(oper)s:_:%(model_type)s_%(relate_type)ss:_:%(id)s:_:%(rid)s:_:%(score)s" % {"oper":oper,"model_type": basetype,"relate_type": ref,"id":baseobj.id,"rid" : obj_id ,"score":score}
else:
val = "%(oper)s:_:%(model_type)s_%(relate_type)ss:_:%(id)s:_:%(rid)s" % {"oper":oper,"model_type": basetype,"relate_type": ref,"id":baseobj.id,"rid" : obj_id}
logger.info("sync: " + val)
#保存数据dao Redis List Queue
dp.lpush("change_log",val)
@property
def ref_klass(self):
"""
Reference the object
return:
the object of self's Reference
"""
from django.db import models
if self.ref:
_known_models = {}
for klass in models.Model.__subclasses__():
if hasattr(klass,"objects"):
_known_models[klass.__name__] = klass
for sub in klass.__subclasses__():
if hasattr(sub,"objects"):
_known_models[sub.__name__] = sub
for ssub in sub .__subclasses__():
if hasattr(ssub,"objects"):
_known_models[ssub.__name__] = ssub
return _known_models.get(self.ref, None)
"""
属性对象
"""
def set(self,instance,val):
"""
set the object's name value
param:
instance:the name type is string
val: the value type is string
"""
setattr(instance,self.name,val)
def __set__(self,instance,val):
"""
set the object's name value
param:
instance:the name type is string
val: the value type is string
"""
setattr(instance,"_"+self.name,val)
def acceptable_types(self):
"""
get the basestring it is python
return:
string
"""
return basestring
def validate(self,instance):
"""
validate the effective of data
param:
instance:object
"""
if self.required:
if not self:
instance._errors.append("%s require" % self)
########################################### Start Oper Decorater#######################################
def operKey(obj,field):
"""
operate Key
param:
obj:object
field:string
return:
string
"""
return "%s:id:%s:%s" % (obj.__class__.__name__,obj.id, field)
def operSet(fn):
"""
this Decoration method
add operation
"""
def _new(self, *args, **kws):
try:
baseobj = args[0]
obj = args[1]
#检查有效性
if not obj:
logger.error("please input dest object")
raise StandardError("please input dest object")
if hasattr(obj,"id") or hasattr(obj,"_id"):
#key = "user:id:1:posts"
key = operKey(baseobj,self.name) #"%s:id:%s:%s" % (baseobj.__class__.__name__,baseobj.id, self.name)
kws["obj"] = obj
kws["baseobj"] = baseobj
fn(self,key,obj.id, **kws)
else:
logger.error("please object is new not have object.id")
raise StandardError("please object is new not have object.id")
except Exception,e:
logger.error(e)
return False
return True
#包装函数
return wraps(fn)(_new)
def operGet(fn):
"""
this is Decoration method
get opearte
"""
def _new(self, *args, **kws):
try:
obj = args[0]
#print obj.id
if hasattr(obj,"id") or hasattr(obj,"_id"):
#如果有ID只保存ID
#key = "user:id:1:posts"
key = operKey(obj,self.name) #"%s:id:%s:%s" % (obj.__class__.__name__,obj.id, self.name)
args = args[1:]
kws["obj"] = obj
return fn(self,key, *args, **kws)
else:
logger.error("please object is new not have object.id")
raise StandardError("please object is new not have object.id")
except Exception,e:
logger.error(e)
return None
#包装函数
return wraps(fn)(_new)
########################################### End Oper Decorater#######################################
class DListField(DAttribute):
def __init__(self,ref=None,required=False,name = None):
"""
initialize
param:
ref:object default is None
required:True or false default is False
name:string
"""
super(DAttribute,self).__init__()
self.ref = ref
self.index = False
self.required = required
self.name = name
""" 添加List 方法 """
@operSet
def lpush(self,key,value,**kwargs):
"""
LPUSH key value Append an element to the head of the List value at key
param;
key:string
value:string
**kwargs: a dict
obj:object
baseobj:base object
return:
True or False
"""
#print "listfield lpush ",key,",",value
try:
if setting.Debug:
n = datetime.datetime.now()
pipe = RedisClient.getInstance().redis.pipeline()
pipe.lpush(key,value)
self.change_log("list:insert",kwargs["obj"].id,kwargs["baseobj"],pipe)
pipe.execute()
if setting.Debug:
logger.info(" lpush key: %s,use : %s" % (key,datetime.datetime.now() - n))
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
@operSet
def rpush(self,key,value,**kwargs):
"""
push the data into list of redis at right of list
param;
key:string
value:string
**kwargs: a dict
obj:object
baseobj:base object
return:
True or False
"""
#Save
#print "listfield rpush ",key,",",value
try:
pipe = RedisClient.getInstance().redis.pipeline()
pipe.rpush(key,value)
self.change_log("list:insert",kwargs["obj"].id,kwargs["baseobj"],pipe)
pipe.execute()
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
@operGet
def lpop(self,key,**kwargs):
"""
LPOP key Return and remove (atomically) the first element of the List at key
param;
key:string
**kwargs: a dict
obj:object
return:
object
"""
# LPOP key Return and remove (atomically) the first element of the List at key
#print "lpop key",key
pk = RedisClient.getInstance().redis.lpop(key)
self.change_log("list:delete",pk,kwargs["obj"])
objs = self.ref_klass.objects.filter(id=pk)
if objs:
return objs[0]
return None
@operGet
def rpop(self,key,**kwargs):
"""
RPOP key Return and remove (atomically) the first element of the List at key
param;
key:string
**kwargs: a dict
obj:object
return:
object
"""
#print "rpop key",key
pk = RedisClient.getInstance().redis.rpop(key)
self.change_log("list:delete",pk,kwargs["obj"])
objs = self.ref_klass.objects.filter(id=pk)
if objs:
return objs[0]
return None
@operGet
def llen(self,key,**kwargs):
"""
LLEN key Return the length of the List value at key
param;
key:string
**kwargs: a dict
return:
integer of length
"""
#print "len key",key
return RedisClient.getInstance().redis.llen(key)
@operGet
def lrange(self,key,start=0,end=10,**kwargs):
"""
LRANGE key start end Return a range of elements from the List at key
param:
key:string
start:integer default is 0
end:integer default is 10
**kwargs:dict
return:
the data in list
"""
if setting.Debug:
n = datetime.datetime.now()
pks = RedisClient.getInstance().redis.lrange(key,start,end)
if setting.Debug:
logger.info("lrange key: %s,start: %s, end: %s ,use : %s" % (key,start,end,datetime.datetime.now() - n))
#返回相关对象集合
return find_include(self.ref_klass,pks,kwargs)
#return self.ref_klass.objects.filter(pk__in = pks)
## @operGet
## def ltrim(self,key,start=0,end=10):
## 这个不支持同步
## # LTRIM key start end Trim the list at key to the specified range of elements
## return RedisClient.getInstance().redis.ltrim(key,start,end)
@operSet
def lrem(self,key,id,count=1,**kwargs):
"""
LREM key count value Remove the first-N, last-N, or all the elements matching value from the List at key
param:
key:string
count:integer default is 1
id:integer
**kwargs:dict
baseobj:base object
return:
True or False
"""
#print "rem key",key
#print "rem value",id
try:
pipe = RedisClient.getInstance().redis.pipeline()
pipe.lrem(key,id,count)
self.change_log("list:delete",id,kwargs["baseobj"])
pipe.execute()
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
@operGet
def delete(self,key,pipe,**kwargs):
"""
delete the list use index
param:
key: string
pipe: redis pipe
return:
True or false
"""
db = pipe | RedisClient.getInstance().redis
return db.delete(key)
class DSetField(DAttribute):
#常量定义
redis = RedisClient.getInstance().redis
def __init__(self,ref=None,required=False,name=None):
"""
initialize reference object name index and required
param:
ref:reference object
required:True or False
name:string
"""
super(DAttribute,self).__init__()
self.ref = ref
self.name = name
self.index = False
self.required = required
@operSet
def sadd(self,key,member,**kwargs):
"""
SADD key member Add the specified member to the Set value at key
param:
key:string
member:string
**kwargs:include obj and baseobj
obj:the object
baseobj: base object
return:
True or False
"""
try:
if setting.Debug:
n = datetime.datetime.now()
pipe = DSetField.redis.pipeline()
pipe.sadd(key,member)
self.change_log("set:insert",kwargs["obj"].id,kwargs["baseobj"],pipe)
pipe.execute()
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
#RedisClient.getInstance().redis.sadd(key,member)
@operGet
def spop(self,key,**kwargs):
"""
SPOP key Remove and return (pop) a random element from the Set value at key
param:
key:string
**kwargs:include obj
obj:the object
return:
object
"""
# SPOP key Remove and return (pop) a random element from the Set value at key
pk = DSetField.redis.spop(key)
self.change_log("set:delete",pk,kwargs["obj"])
#print '#'*10
#print pk
#print self.ref_klass
#print '#'*10
objs = self.ref_klass.objects.filter(pk=pk)
if objs:
return objs[0]
return None
@operSet
def srem(self,key,member,**kwargs):
"""
SREM key member Remove the specified member from the Set value at key
param:
key:string
member:string
**kwargs:include baseobj
baseobj: base object
return:
True or False
"""
#SREM key member Remove the specified member from the Set value at key
try:
pipe = DSetField.redis.pipeline()
pipe.srem(key,member)
self.change_log("list:delete",member,kwargs["baseobj"])
pipe.execute()
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
#RedisClient.getInstance().redis.srem(key,member)
@operGet
def scard(self,key,**kwargs):
"""
SCARD key Return the number of elements (the cardinality) of the Set at key
param:
key:string
**kwargs:dict
return:
count of set by key
"""
return DSetField.redis.scard(key)
@operGet
def sismember(self,key,member_id,**kwargs):
# SISMEMBER key member Test if the specified value is a member of the Set at key
return DSetField.redis.sismember(key,member_id)
@operGet
def smembers(self,key,**kwargs):
"""
SMEMBERS key Return all the members of the Set value at key
param:
key:string
**kwargs:dict
return:
objects of list
"""
# SMEMBERS key Return all the members of the Set value at key
if setting.Debug:
n = datetime.datetime.now()
pks = DSetField.redis.smembers(key)
if kwargs.get("only_ids",False):
return pks
return self.ref_klass.objects.filter(pk__in = pks)
@operGet
def delete(self,key,pipe,**kwargs):
"""
delete the value of key
param;
key:string
pipe:redis
**kwargs:dict
return:
True or False
"""
db = pipe | DSetField.redis
return db.delete(key)
class DSortSetField(DAttribute):
#常量定义
redis = RedisClient.getInstance().redis
def __init__(self,ref=None,required=False,index = False,name=None,limit=500):
"""
initialize name index reference object limit
param:
ref:reference object
required:True or False
index:True or False
name:string default is None
limit:integer default is 1000
"""
super(DAttribute,self).__init__()
self.name = name
self.index = index
self.ref = ref
self.required = required
#限制最大幅度,设置为0为不限制
self.limit = limit
@operSet
def zadd(self,key ,member ,score,**kwargs):
"""
add the member into the sorted set by score
if the member is exist then update it's score
param:
key:string
member:string
score:rank integer
**kwargs:include obj and baseobj
obj:object
baseobj:base object
return:
True or False
"""
try:
if setting.Debug:
n = datetime.datetime.now()
pipe = DSortSetField.redis.pipeline()
pipe.zadd(key ,member ,score)
self.change_log("sortset:insert",kwargs["obj"].id,kwargs["baseobj"],pipe,score)
pipe.execute()
#Start 删除超过LIMIT的
if self.limit > 0:
zcard = DSortSetField.redis.zcard(key)
#print "zcard",zcard
if zcard > self.limit:
#print "* " * 20
#print "Start 删除超过LIMIT的"
#print "rem %s " % key
delete_to = zcard - self.limit
DSortSetField.redis.zremrangebyrank(key,0,delete_to)
#End
return True
except Exception,e:
pipe.reset()
logger.error(e)
return False
#return RedisClient.getInstance().redis.zadd(key ,member ,score)
@operGet
def zrank(self, key ,member_id,**kwargs):
"""
get the the index of member in sorted set
in front is the lowest score
param:
key:string
member_id:integer
**kwargs:dict
return:
integer
"""
return DSortSetField.redis.zrank( key , member_id)
@operGet
def zrevrank( self,key , member_id,**kwargs):
"""
get the the index of member in sorted set
in front is the highest score
param:
key:string
member_id:integer
**kwargs:dict
return:
integer
"""
return DSortSetField.redis.zrevrank( key ,member_id)
@operGet
def zrange(self, key , start=0, end=10,**kwargs):
"""
get the the member in sorted set between start and end
in front is the lowest score
param:
key:string
start:integer
end:integer
**kwargs:dict
return:
members of list
"""
pks = DSortSetField.redis.zrange( key ,start, end) or []
if kwargs.get("only_ids",False):
return pks
else:
return find_include(self.ref_klass,pks,kwargs)
@operGet
def zrevrange(self, key ,start=0, end=10,**kwargs):
"""
get the the index of member in sorted set
in front is the lowest score highest in the back
param:
key:string
member_id:integer
**kwargs:dict
return:
integer
"""
if setting.Debug:
n = datetime.datetime.now()
withscores = kwargs.get("withscores",False)
#t = time.time()
data = DSortSetField.redis.zrevrange( key ,start, end,withscores = withscores) or []
#print "zrevrange use:" ,time.time() - t
#读取的时候带 score
if withscores:
pks = []
scores = {}
for d in data:
pks.append(d[0])
scores[d[0]] = d[1]
else:
pks = data
#print "withscores use:" ,time.time() - t
if kwargs.get("only_ids",False):
return pks
else:
mobjs = find_include(self.ref_klass,tuple(pks),kwargs)
#print "find_include use:" ,time.time() - t
#这里将得分设置为对象的属性
if withscores and mobjs:
m_raws = []
for obj in mobjs:
setattr(obj,"rd_score",scores[str(obj.pk)])
m_raws.append(obj)
mobjs = m_raws
return mobjs
@operGet
def zrangebyscore(self, key ,min, max,**kwargs):
"""
get the the member in sorted set between min and max
param:
key:string
min:integer
max:integer
**kwargs:dict
return:
members of list
"""
pks = DSortSetField.redis.zrangebyscore( key ,min, max) or []
return self.ref_klass.objects.filter(pk__in = pks)
@operGet
def zscore(self, key ,member,**kwargs):
"""
get the score of member
param:
key:string
member_id:integer
**kwargs:dict
return:
score
"""
return DSortSetField.redis.zscore( key ,member.id)
@operGet
def zcard(self, key,**kwargs ):
"""
get the base integer of sorted set
param:
key:string
**kwarg:dict
return:
count of list
"""
return DSortSetField.redis.zcard( key )
@operSet
def zrem(self, key,member_id,**kwargs):
"""
delete the member in sorted set
param:
key:string
member_id:integer
**kwargs:dict
return:
True or False
"""
try:
DSortSetField.redis.zrem( key,member_id)
return True
except Exception,e:
logger.error(e)
return False
@operGet
def zremrangebyrank(self,key,min_rank=0,max_rank=1,**kwargs):
"""
maintain the size of list
pop one object every time
param:
key:string
min_rank:integer default is 0
max_rank:integer default is 1
**kwargs:dict
retrun:
True or False
"""
try:
DSortSetField.redis.zremrangebyrank(key,min_rank,max_rank)
return True
except Exception,e:
logger.error(e)
return False
|
apache-2.0
| 6,926,544,624,200,830,000
| 29.264579
| 197
| 0.511115
| false
| 4.033535
| false
| false
| false
|
bfalacerda/strands_executive
|
task_executor/scripts/mdp_task_executor.py
|
1
|
63607
|
#!/usr/bin/env python
from __future__ import with_statement
import rospy
from Queue import Queue, Empty
from strands_executive_msgs.msg import Task, ExecutionStatus, DurationMatrix, DurationList, ExecutePolicyAction, ExecutePolicyFeedback, ExecutePolicyGoal, MdpStateVar, StringIntPair, StringTriple, MdpAction, MdpActionOutcome, MdpDomainSpec, TaskEvent
from strands_executive_msgs.srv import GetGuaranteesForCoSafeTask, GetGuaranteesForCoSafeTaskRequest, AddCoSafeTasks, DemandCoSafeTask, GetBlacklistedNodes
from task_executor.base_executor import BaseTaskExecutor
from threading import Thread, Condition
from task_executor.execution_schedule import ExecutionSchedule
from operator import attrgetter
from math import floor
import threading
import actionlib
from task_executor.SortedCollection import SortedCollection
from task_executor.utils import rostime_to_python, rostime_close, get_start_node_ids, ros_duration_to_string, ros_time_to_string, max_duration
from dateutil.tz import tzlocal
from copy import copy, deepcopy
from actionlib_msgs.msg import GoalStatus
from rosgraph_msgs.msg import Clock
ZERO = rospy.Duration(0)
class MDPTask(object):
"""
Class to store task and mdp related stuff together.
"""
def __init__(self, task, state_var, action, is_ltl = False, is_on_demand = False, is_interruptible = True):
self.task = task
self.state_var = state_var
self.action = action
self.is_ltl = is_ltl
self.is_mdp_spec = False
self.mdp_spec = None
self.is_on_demand = is_on_demand
self.is_interruptible = is_interruptible
def _set_mdp_spec(self, mdp_spec):
self.mdp_spec = mdp_spec
self.is_mdp_spec = True
self.is_ltl = False
class MDPTaskExecutor(BaseTaskExecutor):
"""
Executor which receives Task objects and converts them into MdpActions and manages their execution.
This distinguishes between three different types of tasks:
a) On demand tasks, which should be executed immediately.
b) Time-critical tasks, which should be executed as close to their start time as possible
c) Normal tasks, which should preferably (but not necessarily) be executed within their time window as possible
On demand tasks are added by the demand task service. The other types are added by the add task service. Time critical tasks are identified by having the same start and end time.
This executor respects task priorities and interruptibility in as far as tasks which declare themselves as uninterruptible will not be interrupted by a same or lower priority on-demand task, and no uninterruptible task will be cancelled due to a timeout.
The clear schedule service cancels all execution (regardless of uninterruptibility state) removes all tasks from the executor.
The executor publishes a schedule which is an ordering over tasks indicating the approximate order they will be considered for execution.
Normal tasks are sent to MDP execution in batches. These batches are limited to a configurable size (rosparam ~mdp_batch_size). On-demand and time critical tasks always have a batch size of one.
"""
def __init__(self):
# init node first, must be done before call to super init for service advertising to work
rospy.init_node("task_executor", log_level=rospy.INFO)
if rospy.get_param('use_sim_time', False):
rospy.loginfo('Using sim time, waiting for time update')
rospy.wait_for_message('/clock', Clock)
# init superclasses
super( MDPTaskExecutor, self ).__init__()
# collection of MDPTasks sorted by deadline
self.normal_tasks = SortedCollection(key=(lambda t: t.task.end_before))
self.time_critical_tasks = SortedCollection(key=(lambda t: t.task.execution_time))
# how late can tasks be expected to be before they're dropped at planning time
self.allowable_lateness = rospy.Duration(rospy.get_param("~allowable_lateness", 300))
self.state_lock = threading.Lock()
self.mdp_exec_client = None
self.set_active_batch([])
self.to_cancel = set()
# is a on-demand task active
self.on_demand_active = False
# only ever allow one batch in the execution queue. If this restriction is removed then demanding won't work immediately
self.mdp_exec_queue = Queue(maxsize = 1)
# Whether or not the normal tasks need to be checked
self.recheck_normal_tasks = False
# how much time should we try to fill with tasks. this is the default and will be extended if necessary
self.execution_window = rospy.Duration(1200)
# and the max number of tasks to fit into this window due to MDP scaling issues
self.batch_limit = 5
self.expected_completion_time = rospy.Time()
self.mdp_exec_thread = Thread(target=self.mdp_exec)
self.mdp_exec_thread.start()
# topic on which current schedule is broadcast
self.schedule_publisher = rospy.Publisher('current_schedule', ExecutionStatus, latch = True, queue_size = 1)
self.all_tasks_schedule_publisher = rospy.Publisher('task_executor/all_tasks', ExecutionStatus, latch = True, queue_size = 1)
self.update_schedule_condition = Condition()
self.schedule_publish_thread = Thread(target=self.publish_schedule)
self.schedule_publish_thread.start()
self.use_combined_sort_criteria = rospy.get_param('~combined_sort', False)
self.cancel_at_window_end = rospy.get_param('~close_windows', False)
if self.use_combined_sort_criteria:
rospy.loginfo('Using combined sort criteria')
else:
rospy.loginfo('Using separate sort criteria')
self.advertise_services()
self.tz = tzlocal()
def add_co_safe_tasks_ros_srv(self, req):
"""
Adds a task into the task execution framework.
"""
try:
self.service_lock.acquire()
now = rospy.get_rostime()
task_ids = []
tasks = []
task_spec_triples = []
for mdp_task in req.mdp_tasks:
task = Task()
task.task_id = self.get_next_id()
task_ids.append(task.task_id)
task.start_after = mdp_task.start_after
task.end_before = mdp_task.end_before
task.priority = mdp_task.priority
task.action = mdp_task.mdp_spec.ltl_task
if task.start_after.secs == 0:
rospy.logwarn('Task %s did not have start_after set' % (task.action))
task.start_after = now
if task.end_before.secs == 0:
rospy.logwarn('Task %s did not have end_before set, using start_after' % (task.action))
task.end_before = task.start_after
tasks.append(task)
task_spec_triples.append((task, mdp_task.mdp_spec, mdp_task.is_interruptible))
self.add_specs(task_spec_triples)
self.log_task_events(tasks, TaskEvent.ADDED, rospy.get_rostime())
return [task_ids]
finally:
self.service_lock.release()
add_co_safe_tasks_ros_srv.type=AddCoSafeTasks
def demand_co_safe_task_ros_srv(self, req):
"""
Demand a the task from the execution framework.
"""
try:
self.service_lock.acquire()
now = rospy.get_rostime()
if not self.are_active_tasks_interruptible():
return [False, 0, self.active_task_completes_by - now]
# A task needs to be created for internal monitoring
task = Task()
task.task_id = self.get_next_id()
task.start_after = req.start_after
task.end_before = req.end_before
task.action = req.domain_spec.ltl_task
# give the task some sensible defaults
if task.start_after.secs == 0:
rospy.loginfo('Demanded task %s did not have start_after set, using now' % (task.action))
task.start_after = now
if task.end_before.secs == 0:
rospy.loginfo('Demand task %s did not have end_before set, using start_after' % (task.action))
# make this appear as a time-critical task
task.end_before = now
task.execution_time = now
# stop anything else
if len(self.active_tasks) > 0:
self.pause_execution()
self.executing = False
self.cancel_active_task()
# and inform implementation to let it take action
self.spec_demanded(task, req.domain_spec)
if not self.executing:
self.executing = True
self.start_execution()
self.log_task_event(task, TaskEvent.DEMANDED, rospy.get_rostime())
return [True, task.task_id, rospy.Duration(0)]
finally:
self.service_lock.release()
demand_co_safe_task_ros_srv.type=DemandCoSafeTask
def _extend_formalua_with_exec_flag(self, formula, state_var_name):
insert_after = len(formula) - 1
for i in range(len(formula) - 1, 0, -1):
if formula[i] == ')':
insert_after = i
elif formula[i] == '(':
break
return formula[:insert_after] + ' & (X ' + state_var_name + '=1)' + formula[insert_after:]
def _create_travel_mdp_task(self, waypoint):
""" Creates an MDP task for just reacing these waypoints
"""
state_var = MdpStateVar()
action = MdpAction()
task = Task(action='(F "%s")' % waypoint)
return MDPTask(task, state_var, action, is_ltl = True)
def _convert_spec_to_mdp_action(self, task, mdp_spec, is_ltl = False, is_interruptible = True):
"""
Converts an already formed MdpDomainSpec into our internal representation that's now a bit redundant.
"""
mdp_task = MDPTask(task, None, None, is_ltl = is_ltl, is_interruptible = is_interruptible)
mdp_task._set_mdp_spec(mdp_spec)
return mdp_task
def _convert_task_to_mdp_action(self, task):
""" Converts a Task to a MdpAction.
returns a task, state var, action triple
"""
is_ltl = False
# if this is the case then we're passed an LTL formula
if ' ' in task.action:
# action_name = 'n'+ str(task.task_id) + '_ltl_task'
# state_var_name = 'executed_' + action_name
state_var = MdpStateVar()
outcome = MdpActionOutcome()
action = MdpAction()
# task.action = self._extend_formalua_with_exec_flag(task.action, state_var_name)
# state_var = MdpStateVar(name = state_var_name,
# init_val = 0, min_range = 0,
# max_range = 1)
# outcome = MdpActionOutcome(probability = 1.0,
# post_conds = [StringIntPair(string_data = state_var_name, int_data = 1)],
# duration_probs = [1.0],
# durations = [0])
# action = MdpAction(name=action_name,
# pre_conds=[StringIntPair(string_data=state_var_name, int_data=0)],
# outcomes=[outcome])
is_ltl = True
else:
action_name = 'n'+ str(task.task_id) + '_' + task.action + '_at_' + task.start_node_id.replace(' | ', '_or_')
# make sure there is nothing to make PRISM cry
action_name = action_name.replace('/','_')
state_var_name = 'executed_' + action_name
state_var = MdpStateVar(name = state_var_name,
init_val = 0, min_range = 0,
max_range = 1)
outcome=MdpActionOutcome(probability = 1.0,
post_conds = [StringIntPair(string_data = state_var_name, int_data = 1)],
duration_probs = [1.0],
durations = [task.expected_duration.to_sec()])
action = MdpAction(name=action_name,
action_server=task.action,
pre_conds=[StringIntPair(string_data=state_var_name, int_data=0)],
outcomes=[outcome])
if len(task.start_node_id) > 0:
for wp in get_start_node_ids(task):
action.waypoints.append(wp)
action.arguments = task.arguments
# print state_var
# print action
return MDPTask(task, state_var, action, is_ltl = is_ltl)
def add_tasks(self, tasks):
""" Called with new tasks for the executor """
with self.state_lock:
for task in tasks:
mdp_task = self._convert_task_to_mdp_action(task)
if task.start_after == task.end_before:
self.time_critical_tasks.insert(mdp_task)
else:
self.normal_tasks.insert(mdp_task)
self.republish_schedule()
self.recheck_normal_tasks = True
def add_specs(self, task_spec_triples):
""" Called with new mdp_specs for the executor """
with self.state_lock:
for task, mdp_spec, is_interruptible in task_spec_triples:
mdp_task = self._convert_spec_to_mdp_action(task, mdp_spec, is_interruptible = is_interruptible)
if task.start_after == task.end_before:
self.time_critical_tasks.insert(mdp_task)
else:
self.normal_tasks.insert(mdp_task)
self.republish_schedule()
self.recheck_normal_tasks = True
def spec_demanded(self, task, mdp_spec):
with self.state_lock:
prior_execution_state = self.executing
# this cleans up the current execution and sets self.executing to false
self.pause_execution()
# todo: potential race condition -- what happens if someone calls start/pause execution here
with self.state_lock:
# convert the demanded task into an mdp task for policy execution
demanded_mdp_task = self._convert_spec_to_mdp_action(task, mdp_spec)
demanded_mdp_task.is_on_demand = True
# and queue it up for execution
mdp_goal = self._mdp_single_task_to_goal(demanded_mdp_task)
# put blocks until the queue is empty, so we guarantee that the queue is empty while we're under lock
tasks = [demanded_mdp_task]
self.mdp_exec_queue.put((mdp_goal, tasks, self._get_guarantees_for_batch(tasks)[1]))
rospy.loginfo('Queued up demanded task: %s' % (demanded_mdp_task.task.action))
self.executing = prior_execution_state
def goal_status_to_task_status(self, goal_status):
if goal_status == GoalStatus.PREEMPTED:
return TaskEvent.TASK_PREEMPTED
elif goal_status == GoalStatus.SUCCEEDED:
return TaskEvent.TASK_SUCCEEDED
elif goal_status == GoalStatus.ACTIVE:
return TaskEvent.TASK_FAILED
else:
if goal_status != GoalStatus.ABORTED:
rospy.logwarn('Unknown conversion to TaskStatus for %s' % GoalStatus.to_string(goal_status))
return TaskEvent.TASK_FAILED
def mdp_exec_feedback(self, feedback):
"""
Called during execution with feedback from policy execution.
"""
with self.state_lock:
# print("Got Feedback: " + str(feedback))
rospy.loginfo('%s received feedback %s, %s' % (feedback.executed_action, GoalStatus.to_string(feedback.execution_status), feedback.expected_time.to_sec()))
self.expected_completion_time = self._expected_duration_to_completion_time(feedback.expected_time)
# if feedback.execution_status >= GoalStatus.PREEMPTED:
# we don't need to check this status as we only recieve this feedback in the terminal states of the mdp, so this action is done regarless
# todo: PREEMPTED means the action started but was cancelled during the execution if the action server
# todo: ACTIVE means the action started but was cancelled during the execution if the action server but didn't prempt
# todo: show we allow these to be re-added to execution queue? currently preemption signals that a task has been permanently removed
# todo: if added back to normal tasks it will almost certainly be re-executed immediately as it's at the current location, causing a loop
now = rospy.get_rostime()
if feedback.executed_action != '' and self.remove_active_task(feedback.executed_action, self.goal_status_to_task_status(feedback.execution_status)):
# update the time critical tasks based on current location
self._update_time_critical_tasks(now)
self.republish_schedule()
def remove_active_task(self, action_name, task_status):
"""
Remove the indicated task from the active batch. This is based on the (valid) assumption that the action name uniquely identifies the task.
"""
for i in range(len(self.active_batch)):
mdp_task = self.active_batch[i]
if mdp_task.action is not None and mdp_task.action.name == action_name:
del self.active_batch[i]
del self.active_tasks[i]
log_string = 'Removing completed active task: %s. %s remaining in active batch' % (action_name, len(self.active_batch))
rospy.loginfo(log_string)
self.log_task_event(mdp_task.task, task_status, rospy.get_rostime(), description = log_string)
return True
# rospy.logwarn('Could not find %s in active batch' % action_name)
return False
def _check_for_late_normal_tasks(self, now):
"""
Removes any normal tasks which are too late to start execution
"""
dropped = False
while len(self.normal_tasks) > 0:
# look at the next normal task
next_normal_task = self.normal_tasks[0]
# drop the task if there's not enough time for expected duration to occur before the window closes
# this ignores the navigation time for this task, making task dropping more permissive than it should be. this is ok for now.
if now > (next_normal_task.task.end_before - next_normal_task.task.expected_duration):
log_string = 'Dropping queued normal task %s at %s as time window closed at %s ' % (next_normal_task.task.action, rostime_to_python(now), rostime_to_python(next_normal_task.task.end_before))
rospy.loginfo(log_string)
self.normal_tasks = SortedCollection(self.normal_tasks[1:], key=(lambda t: t.task.end_before))
self.log_task_event(next_normal_task.task, TaskEvent.DROPPED, now, description = log_string)
dropped = True
else:
break
return dropped
def _check_for_late_time_critical_tasks(self, now):
"""
Removes any time-critical tasks which are too late to start execution
"""
dropped = False
while len(self.time_critical_tasks) > 0:
next_time_critical_task = self.time_critical_tasks[0]
until_next_critical_task = next_time_critical_task.task.execution_time - now
if until_next_critical_task < (ZERO - self.allowable_lateness):
log_string = 'Dropping time-critical task %s as %s not enough time for execution' % (next_time_critical_task.action.name, until_next_critical_task.to_sec())
rospy.loginfo(log_string)
self.time_critical_tasks = SortedCollection(self.time_critical_tasks[1:], key=(lambda t: t.task.execution_time))
self.log_task_event(next_time_critical_task.task, TaskEvent.DROPPED, now, description = log_string)
dropped = True
else:
break
return dropped
def _get_blacklisted_nodes(self):
"""
Gets blacklisted nodes from service. If service does not exist, returns an empty list.
"""
try:
get_blacklisted_nodes = rospy.ServiceProxy('task_executor/get_blacklisted_nodes', GetBlacklistedNodes)
resp = get_blacklisted_nodes()
return resp.nodes
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return []
def _mdp_single_task_to_goal(self, mdp_task):
mdp_spec = self._mdp_tasks_to_spec([mdp_task])
return ExecutePolicyGoal(spec = mdp_spec)
def _mdp_tasks_to_spec(self, mdp_tasks):
"""
Take a collection of MDPTask objects and produce an MdpDomainSpec from them.
"""
mdp_spec = MdpDomainSpec()
ltl_tasks = []
non_ltl_tasks = []
for mdp_task in mdp_tasks:
if mdp_task.is_ltl:
ltl_tasks.append(mdp_task)
elif mdp_task.is_mdp_spec:
ltl_tasks.append(mdp_task)
mdp_spec.vars.extend(mdp_task.mdp_spec.vars)
mdp_spec.actions.extend(mdp_task.mdp_spec.actions)
else:
non_ltl_tasks.append(mdp_task)
mdp_spec.vars.append(mdp_task.state_var)
mdp_spec.actions.append(mdp_task.action)
mdp_spec.ltl_task = ''
task_prefix = 'F '
# prevent the policy from visiting blacklisted nodes
# short-term fix is to have (!X U Y) & (!X U Z),
# but longer term is Bruno adding G !X so we can have global invariants
blacklist = self._get_blacklisted_nodes()
if len(blacklist) > 0:
task_prefix = '(!\"%s\"' % blacklist[0]
for bn in blacklist[1:]:
task_prefix += ' & !\"%s\"' % bn
task_prefix += ') U '
if len(non_ltl_tasks) > 0:
for mdp_task in non_ltl_tasks:
mdp_spec.ltl_task += '(%s %s=1) & ' % (task_prefix, mdp_task.state_var.name)
mdp_spec.ltl_task = mdp_spec.ltl_task[:-3]
# mdp_spec.ltl_task += '))'
if len(ltl_tasks) > 0:
mdp_spec.ltl_task += ' & '
if len(ltl_tasks) > 0:
for ltl_task in ltl_tasks:
if ltl_task.is_mdp_spec:
mdp_spec.ltl_task += ltl_task.mdp_spec.ltl_task
mdp_spec.ltl_task += ' & '
else:
mdp_spec.ltl_task += ltl_task.task.action
mdp_spec.ltl_task += ' & '
mdp_spec.ltl_task = mdp_spec.ltl_task[:-3]
return mdp_spec
def _drop_out_of_time_tasks(self, now):
"""
Drop any normal or time-critical task when their time windows have been exceeded.
"""
dropped = self._check_for_late_time_critical_tasks(now)
dropped = dropped or self._check_for_late_normal_tasks(now)
return dropped
def _get_guarantees_for_batch(self, task_batch, estimates_service = None, initial_waypoint = None, epoch = None):
if epoch is None:
epoch = rospy.get_rostime()
if initial_waypoint is None:
initial_waypoint = self.get_topological_node()
if estimates_service is None:
estimates_service = rospy.ServiceProxy('mdp_plan_exec/get_guarantees_for_co_safe_task', GetGuaranteesForCoSafeTask)
estimates_service.wait_for_service()
spec = self._mdp_tasks_to_spec(task_batch)
request = GetGuaranteesForCoSafeTaskRequest(spec = spec, initial_waypoint = initial_waypoint, epoch = epoch)
service_response = estimates_service(request)
return (spec, service_response)
def _choose_new_active_batch(self, task_check_limit, now, execution_window):
"""
Choose the tasks to execute next.
task_check_limit says how far along the normal task list to go to look at possible tasks
"""
# evaluated_at_least_one_task, new_active_batch, new_active_spec, new_active_guarantees = self._choose_new_active_batch()
mdp_estimates = rospy.ServiceProxy('mdp_plan_exec/get_guarantees_for_co_safe_task', GetGuaranteesForCoSafeTask)
mdp_estimates.wait_for_service()
last_successful_spec = None
possibles_with_guarantees_in_time = []
possibles_with_guarantees = []
# now for each single task, get indpendent guarantees
for mdp_task in self.normal_tasks[:task_check_limit]:
try:
(mdp_spec, guarantees) = self._get_guarantees_for_batch([mdp_task], estimates_service = mdp_estimates, epoch = now)
# only reason about combining tasks that have their windows opena and are achievable on their own
#
nav_time = max_duration(guarantees.expected_time - mdp_task.task.max_duration, ZERO)
if False:
print 'timing details'
print ros_time_to_string(now)
print ros_time_to_string(mdp_task.task.start_after)
print ros_duration_to_string(guarantees.expected_time)
print ros_duration_to_string(mdp_task.task.max_duration)
print "Start by: %s" % ros_time_to_string(mdp_task.task.start_after - nav_time)
if now > (mdp_task.task.start_after - nav_time):
if guarantees.probability > 0 and guarantees.expected_time <= execution_window:
possibles_with_guarantees_in_time.append((mdp_task, mdp_spec, guarantees))
# keep all guarantees anyway, as we might need to report one if we can't find a task to execute
possibles_with_guarantees.append((mdp_task, mdp_spec, guarantees))
except Exception, e:
rospy.logwarn('Ignoring task due to: %s' % e)
self.normal_tasks.remove(mdp_task)
if self.use_combined_sort_criteria:
def task_reward(task_tuple):
# sanity check for zero-time case
if task_tuple[2].expected_time.secs > 0:
expected_time = task_tuple[2].expected_time.to_sec()
else:
expected_time = 1.0
# sanity check for zero priority case
if task_tuple[0].task.priority == 0:
rospy.logwarn('Priority is used for sorting but task %s had a priority of 0' % (task_tuple[0].task.action))
priority = 1.0
else:
priority = task_tuple[0].task.priority
return (priority*task_tuple[2].probability)/expected_time
possibles_with_guarantees_in_time = sorted(possibles_with_guarantees_in_time, key=lambda x: task_reward(x), reverse=True)
for possible in possibles_with_guarantees_in_time:
rospy.loginfo('%s, with reward %.2f, will take %.2f secs with prio %s and prob %.4f ending before %s' % (possible[0].task.action, task_reward(possible), possible[2].expected_time.to_sec(), possible[0].task.priority, possible[2].probability, rostime_to_python(possible[0].task.end_before)))
else:
# sort the list of possibles by probability of success, with highest prob at start
# sort is stable, so a sequence of sorts will work, starting with the lowest priorit
possibles_with_guarantees_in_time = sorted(possibles_with_guarantees_in_time, key=lambda x: x[0].task.end_before)
possibles_with_guarantees_in_time = sorted(possibles_with_guarantees_in_time, key=lambda x: x[2].probability, reverse=True)
possibles_with_guarantees_in_time = sorted(possibles_with_guarantees_in_time, key=lambda x: x[0].task.priority, reverse=True)
for possible in possibles_with_guarantees_in_time:
rospy.loginfo('%s will take %.2f secs with prio %s and prob %.4f ending before %s' % (possible[0].task.action, possible[2].expected_time.to_sec(), possible[0].task.priority, possible[2].probability, rostime_to_python(possible[0].task.end_before)))
# if at least one task fits into the executable time window
if len(possibles_with_guarantees_in_time) > 0:
# keep the most probable
new_active_batch = [possibles_with_guarantees_in_time[0][0]]
last_successful_spec = (possibles_with_guarantees_in_time[0][1], possibles_with_guarantees_in_time[0][2])
# remove the most probable from the list of possibles
possibles_with_guarantees_in_time = possibles_with_guarantees_in_time[1:]
# limit the tasks inspected by the batch limit... we are skipping tasks, so just using the batch limit isn't enough
for possible in possibles_with_guarantees_in_time:
if len(new_active_batch) == self.batch_limit:
break
mdp_task = possible[0]
mdp_tasks_to_check = copy(new_active_batch)
mdp_tasks_to_check.append(mdp_task)
(mdp_spec, guarantees) = self._get_guarantees_for_batch(mdp_tasks_to_check, estimates_service = mdp_estimates, epoch = now)
if guarantees.expected_time > execution_window:
rospy.loginfo('Too long policy duration for %s: %s' % (mdp_spec.ltl_task, guarantees.expected_time.to_sec()))
else:
rospy.loginfo('Acceptable policy duration for %s: %s' % (mdp_spec.ltl_task, guarantees.expected_time.to_sec()))
last_successful_spec = (mdp_spec, guarantees)
new_active_batch.append(mdp_task)
return True, new_active_batch, last_successful_spec[0], last_successful_spec[1]
# if we get here then at least one task can be executed now, but doesn't fit into the execution window on its own
elif len(possibles_with_guarantees) > 0:
return True, [], possibles_with_guarantees[0][1], possibles_with_guarantees[0][2]
# if we get here either there are no tasks or none have passed start_after
else:
return False, [], None, None
def _update_time_critical_tasks(self, now):
"""
Update the execution time of each time critical task based on current location.
"""
# todo: we don't need to always check, only when location has changed... but let's optimise later
# how far in the future to update tasks
only_check_in_the_next = self.execution_window * 2
check_before = now + only_check_in_the_next
estimates_service = rospy.ServiceProxy('mdp_plan_exec/get_guarantees_for_co_safe_task', GetGuaranteesForCoSafeTask)
estimates_service.wait_for_service()
new_time_critical_tasks = SortedCollection(key=(lambda t: t.task.execution_time))
for mdp_task in self.time_critical_tasks:
try:
if mdp_task.task.execution_time.secs == 0 or mdp_task.task.start_after < check_before:
spec, guarantees = self._get_guarantees_for_batch([self._create_travel_mdp_task(mdp_task.task.start_node_id)], estimates_service = estimates_service, epoch = now)
# take the predicted time directly, alternative factor in the probability,
# see below.
expected_navigation_time = rospy.Duration(guarantees.expected_time.secs)
# prevents an underestimate due to this being the expected time to failure
# expected_navigation_time = rospy.Duration(guarantees.expected_time.secs / guarantees.probability)
rospy.loginfo('Expected navigation time for time-critical task: %s' % expected_navigation_time.secs)
mdp_task.task.execution_time = mdp_task.task.start_after - expected_navigation_time
new_time_critical_tasks.insert(mdp_task)
except Exception, e:
rospy.logwarn('Dropping time-critical task due to: %s' % e)
self.time_critical_tasks.remove(mdp_task)
self.log_task_event(mdp_task.task, TaskEvent.DROPPED, now, description = 'Error on guarantee call. Probably due to incorrect waypoint.')
self.republish_schedule()
self.time_critical_tasks = new_time_critical_tasks
# for mdp_task in self.time_critical_tasks:
# print mdp_task.action.name, 'at', rostime_to_python(mdp_task.task.execution_time), 'for', rostime_to_python(mdp_task.task.start_after)
def _should_start_next_time_critical_task(self, now):
if len(self.time_critical_tasks) > 0:
# if we're over the start time, it's good to go... lateness is handled in _check_for_late_time_critical_tasks
return now > self.time_critical_tasks[0].task.execution_time
else:
return False
def _next_execution_batch(self):
"""
Called when nothing is executing and another batch of tasks are required for execution.
"""
# todo: make the locking more fine-grained. Currently execution cannot be paused during this method, but the calls to the mdp services can take a long time
with self.state_lock:
now = rospy.get_rostime()
# todo: this ignores what happens when the robot is moving, so need to check during execution too.
self._update_time_critical_tasks(now)
if self._drop_out_of_time_tasks(now):
self.republish_schedule()
execution_window = self.execution_window
# now see how much time is available until the next time critical task
if len(self.time_critical_tasks) > 0:
next_time_critical_task = self.time_critical_tasks[0]
until_next_critical_task = next_time_critical_task.task.execution_time - now
rospy.loginfo('Time until next time-critical task: %.2f secs' % until_next_critical_task.to_sec())
if until_next_critical_task < execution_window:
execution_window = until_next_critical_task
# if we're close to a time critical task, then do that
if self._should_start_next_time_critical_task(now):
new_active_batch = [next_time_critical_task]
self.time_critical_tasks = SortedCollection(self.time_critical_tasks[1:], key=(lambda t: t.task.execution_time))
mdp_goal = self._mdp_single_task_to_goal(next_time_critical_task)
rospy.loginfo('Executing time-critical task: %s. Start time was %s for execution at %s. Time is now %s' % (mdp_goal.spec.ltl_task, rostime_to_python(next_time_critical_task.task.execution_time), rostime_to_python(next_time_critical_task.task.start_after), rostime_to_python(now)))
self.mdp_exec_queue.put((mdp_goal, new_active_batch, self._get_guarantees_for_batch(new_active_batch, epoch = now)[1]))
# else see what we can squeeze into available time
elif self.recheck_normal_tasks:
rospy.loginfo('Checking for normal tasks to fit into available time: %.2f secs' % execution_window.to_sec())
# create mdp task batch to fit into available time
#
# this checks expected time after adding each task to the batch
if len(self.normal_tasks) == 0:
rospy.loginfo('No normal tasks remaining')
self.recheck_normal_tasks = False
else:
task_check_limit = 2 * self.batch_limit
evaluated_at_least_one_task, new_active_batch, new_active_spec, new_active_guarantees = self._choose_new_active_batch(task_check_limit, now, execution_window)
# if we found tasks to fit into the time available
if len(new_active_batch) > 0:
new_normal_tasks = self.normal_tasks[task_check_limit:]
for mdp_task in self.normal_tasks[:task_check_limit]:
if mdp_task not in new_active_batch:
new_normal_tasks.append(mdp_task)
self.normal_tasks = SortedCollection(new_normal_tasks, key=(lambda t: t.task.end_before))
mdp_goal = ExecutePolicyGoal(spec = new_active_spec)
rospy.loginfo('Executing normal batch: %s' % mdp_goal.spec.ltl_task)
self.mdp_exec_queue.put((mdp_goal, new_active_batch, new_active_guarantees))
# if we couldn't fit a batch in, but there were normal tasks available
elif evaluated_at_least_one_task:
# if the first available task won't fit into the available execution time window, and this is the max possible, then increase the window size accordingly
if execution_window == self.execution_window and new_active_guarantees.expected_time > self.execution_window:
# for now just increase to the expected time of last tested policy
self.execution_window = new_active_guarantees.expected_time
rospy.loginfo('Extending default execution windown to %s' % self.execution_window.to_sec())
# if we get here then we can't fit the first available task into the time before the first time-critical task
else:
# the basic thing here is not to recheck the normal tasks until after the next time-critical execution or until new normal tasks are added (which could be potentially earlier/shorter)
self.recheck_normal_tasks = False
# todo: we could also try some optimisation to fit in a task other than the first available normal one
else:
# if we get here we have normal tasks, but none of them were available for execution. this probaly means
# that they're for the future
# we can't set recheck_normal_tasks to False as this is the only way the time is rechecked
rospy.loginfo('Next task available for execution in at most %.2f secs' % (self.normal_tasks[0].task.start_after - now).to_sec())
# pass
else:
rospy.logdebug('No need to recheck normal tasks')
def _expected_duration_to_completion_time(self, expected_duration):
"""
Take a guarantees struct and determine when the execution should complete by
"""
if expected_duration.secs < 0:
rospy.logwarn('Expected duration was less that 0, giving a default of 5 minutes')
expected_duration = rospy.Duration(5 * 60)
expected_completion_time = rospy.get_rostime() + expected_duration + rospy.Duration(60)
if self.cancel_at_window_end:
for mdp_task in self.active_batch:
# only curtail tasks to window for non-time critical tasks
if mdp_task.task.start_after != mdp_task.task.end_before and mdp_task.task.end_before < expected_completion_time:
# rospy.logwarn('Curtailing execution with end of task window')
expected_completion_time = mdp_task.task.end_before
return expected_completion_time
def are_active_tasks_interruptible(self):
for mdp_task in self.active_batch:
if not mdp_task.is_interruptible:
return False
return super(MDPTaskExecutor, self).are_active_tasks_interruptible()
def _wait_for_policy_execution(self):
"""
Wait until policy execution is complete or until we reach expected_completion_time at which point policy execution is preempted.
"""
poll_time = rospy.Duration(5)
overtime = rospy.Duration(0)
# after an hour of overtime, give up
overtime_threshold = rospy.Duration(60 * 60)
log_count = 0
while not self.mdp_exec_client.wait_for_result(poll_time) and not rospy.is_shutdown():
# locking here as the feedback callback can change self.expected_completion_time
with self.state_lock:
now = rospy.get_rostime()
remaining_secs = (self.expected_completion_time - now).to_sec()
if remaining_secs < 0:
if self.are_active_tasks_interruptible():
rospy.logwarn('Policy execution did not complete in expected time, preempting')
self.mdp_exec_client.cancel_all_goals()
# give the policy execution some time to clean up
complete = self.mdp_exec_client.wait_for_result(rospy.Duration(70))
if not complete:
rospy.logwarn('Policy execution did not service preempt request in a reasonable time')
return GoalStatus.ACTIVE
else:
return GoalStatus.PREEMPTED
else:
rospy.logwarn('Policy execution did not complete in expected time, but is non-interruptible, so waiting. Overtime: %ss' % ros_duration_to_string(overtime))
overtime += poll_time
if overtime > overtime_threshold:
rospy.logwarn('Policy execution has exceeded overtime threshold all execution flags ignored, preempting regardless')
self.mdp_exec_client.cancel_all_goals()
# give the policy execution some time to clean up
complete = self.mdp_exec_client.wait_for_result(rospy.Duration(70))
if not complete:
rospy.logwarn('Policy execution did not service preempt request in a reasonable time')
return GoalStatus.ACTIVE
else:
return GoalStatus.RECALLED
else:
if log_count % 3 == 0:
rospy.loginfo('Another %.2f seconds until expected policy completion' % remaining_secs)
log_count += 1
with self.state_lock:
# check whether we're due to start a time-critical task that we'd otherwise miss
if self._should_start_next_time_critical_task(now):
if self.on_demand_active:
rospy.logwarn('Ignoring the start of a time-critical task due to an on-demand task')
else:
rospy.logwarn('We should be executing a time-critical task now, so cancelling execution')
self.mdp_exec_client.cancel_all_goals()
complete = self.mdp_exec_client.wait_for_result(rospy.Duration(70))
if not complete:
rospy.logwarn('Policy execution did not service preempt request in a reasonable time')
return GoalStatus.ACTIVE
else:
return GoalStatus.PREEMPTED
return self.mdp_exec_client.get_state()
def mdp_exec(self):
"""
This is the main loop of the executor. It checks for the next goal to execute.
If there's nothing to execute then it calls _next_execution_batch to check for available tasks.
"""
while not rospy.is_shutdown():
# all encompassing try/catch to make sure this loop does not go down
try:
# try/catch for empty queue
try:
# keep looping until paused or an Empty is thrown
while self.executing and not rospy.is_shutdown():
(mdp_goal, new_active_batch, guarantees) = self.mdp_exec_queue.get(timeout = 1)
sent_goal = False
with self.state_lock:
# always set active batch, but we can correct it later if we don't actually send the goal
self.set_active_batch(deepcopy(new_active_batch))
self.republish_schedule()
# execution status could have changed while acquiring the lock
if self.executing:
self.mdp_exec_client = actionlib.SimpleActionClient('mdp_plan_exec/execute_policy', ExecutePolicyAction)
self.mdp_exec_client.wait_for_server()
# last chance! -- if there was a change during wait
if self.executing:
self.log_task_events((m.task for m in self.active_batch), TaskEvent.TASK_STARTED, rospy.get_rostime(), description = mdp_goal.spec.ltl_task)
self.mdp_exec_client.send_goal(mdp_goal, feedback_cb = self.mdp_exec_feedback)
# this is when we expect navigation to complete by
self.expected_completion_time = self._expected_duration_to_completion_time(guarantees.expected_time)
rospy.loginfo('Sent goal for %s' % mdp_goal.spec.ltl_task)
self.republish_schedule()
for m in self.active_batch:
self.on_demand_active = self.on_demand_active or m.is_on_demand
if self.on_demand_active:
rospy.loginfo('This is an on-demand task')
sent_goal = True
else:
self.mdp_exec_client = None
# indicate that all processing on the task removed from the queue is complete
# this allows join() to work correctly
self.mdp_exec_queue.task_done()
if sent_goal:
final_status = self._wait_for_policy_execution()
with self.state_lock:
# these are left after execution
# remove those tasks which were part of the cancelled set
# print self.to_cancel
active_tasks = []
cancelled_tasks = []
for m in self.active_batch:
# print m.task.task_id
if m.task.task_id in self.to_cancel:
# print 'cancelled'
cancelled_tasks.append(m)
else:
# print 'active'
active_tasks.append(m)
self.active_batch = active_tasks
self.to_cancel = []
# print cancelled_tasks
# print self.active_batch
if len(cancelled_tasks) > 0:
log_string = 'Dropped %s task(s) after execution due to cancellation' % len(cancelled_tasks)
rospy.loginfo(log_string)
self.log_task_events((m.task for m in cancelled_tasks), TaskEvent.DROPPED, rospy.get_rostime(), description = log_string)
remaining_active = len(self.active_batch)
self.on_demand_active = False
# policy execution finished everything
#if final_status == GoalStatus.SUCCEEDED or final_status == GoalStatus.PREEMPTED:
if True: #This way tasks arent dropped when navigation failures occur. TODO see whether the stuff under the else statement is needed for some cases.
self.deactivate_active_batch(goal_status = final_status)
# here we may have cancelled an overrunning policy or had some other problem
else:
log_string = 'Policy execution exited with status %s, dropping remaining active tasks' % GoalStatus.to_string(final_status)
rospy.loginfo(log_string)
self.log_task_events((m.task for m in self.active_batch), TaskEvent.DROPPED, rospy.get_rostime(), description = log_string)
# todo: is dropping really necessary here? the tasks themselves were not aborted, just policy execution
self.set_active_batch([])
# make sure this can't be used now execution is complete
self.mdp_exec_client = None
# whatever happened or was executed, we should now recheck the available normal tasks
self.recheck_normal_tasks = True
else:
with self.state_lock:
self.deactivate_active_batch(goal_status = GoalStatus.RECALLED, save_all = True)
self.republish_schedule()
except Empty, e:
pass
# state of execution could have changed since the last check
if self.executing:
self._next_execution_batch()
else:
rospy.sleep(1)
except Exception, e:
rospy.logwarn('Caught exception in the mdp_exec loop: %s' % e)
rospy.sleep(1)
# makes publishing thread check for exit
self.republish_schedule()
def set_active_batch(self, batch):
"""
Set the active batch of tasks. Also updates self.active_tasks in the base class
"""
self.active_batch = copy(batch)
self.active_tasks = [m.task for m in self.active_batch]
def start_execution(self):
""" Called when overall execution should (re)start """
rospy.loginfo('(Re-)starting execution')
def deactivate_active_batch(self, goal_status, save_all = False, description = ''):
"""
Takes the tasks from the active batch and returns them to the approach lists for later consideration.
"""
active_count = len(self.active_batch)
now = rospy.get_rostime()
log_string = 'De-activating remaining %s tasks after execution finished with status %s.' % (active_count, GoalStatus.to_string(goal_status))
if active_count > 0:
if save_all:
log_string += ' Saving all back to task list.'
for mdp_task in self.active_batch:
if mdp_task.task.start_after == mdp_task.task.end_before:
self.time_critical_tasks.insert(mdp_task)
else:
self.normal_tasks.insert(mdp_task)
else:
# for each task remaining in the active batch, put it back into the right list
do_not_reactivate_later = []
reactivate_later = []
for mdp_task in self.active_batch:
# we can't monitor the execution of these tasks, so we always assume they're done when deactivated
if mdp_task.is_ltl or mdp_task.is_mdp_spec or mdp_task.is_on_demand:
do_not_reactivate_later.append(mdp_task)
else:
reactivate_later.append(mdp_task)
self.log_task_events((m.task for m in do_not_reactivate_later), self.goal_status_to_task_status(goal_status), now, description = log_string + ' Cannot be reactivated later.')
self.log_task_events((m.task for m in reactivate_later), TaskEvent.TASK_STOPPED, now, description = log_string + ' Saved task to reactivate later')
for mdp_task in reactivate_later:
if mdp_task.task.start_after == mdp_task.task.end_before:
self.time_critical_tasks.insert(mdp_task)
else:
self.normal_tasks.insert(mdp_task)
# empty the active batch. this might mean some feedback misses the update
# the consequence is that the task was completed but we preempted before receiving the update,
# this means the task will be executed again, but there's no easy way around this
self.set_active_batch([])
rospy.loginfo(log_string)
return active_count
def pause_execution(self):
""" Called when overall execution should pause. This is called *before* self.executing is set to False. """
# make sure the queue for execution is empty
self.mdp_exec_queue.join()
with self.state_lock:
self._pause_execution_internal()
# wait for active batch to be empty before return
while not rospy.is_shutdown():
with self.state_lock:
if self.active_batch == []:
return
# print 'waiting for active batch to become empty'
rospy.sleep(0.5)
def _pause_execution_internal(self):
"""
Does the work of pausing execution, without the lock.
"""
# this is done by the super class *after* pause_execution completes, but we need to make sure that it is done before this lock is released to make sure execution does not continue after policy execution preemption
self.executing = False
# If the client is not None then there is execution going on. the active batch could be empty if we've just caught the tail end of execution
#
# Also there could be tasks in the active batch without an action client existing. as the client is created slightly later
if self.mdp_exec_client is not None:
# preempt the action server
self.mdp_exec_client.cancel_all_goals()
rospy.loginfo('Cancelling policy execution')
else:
rospy.loginfo('No policy execution active when pausing')
def task_demanded(self, demanded_task, currently_active_task):
""" Called when a task is demanded. self.active_task is the demanded task (and is being executed) and previously_active_task was the task that was being executed (which could be None) """
with self.state_lock:
prior_execution_state = self.executing
# this cleans up the current execution and sets self.executing to false
self.pause_execution()
# todo: potential race condition -- what happens if someone calls start/pause execution here
with self.state_lock:
# convert the demanded task into an mdp task for policy execution
demanded_mdp_task = self._convert_task_to_mdp_action(demanded_task)
demanded_mdp_task.is_on_demand = True
# and queue it up for execution
mdp_goal = self._mdp_single_task_to_goal(demanded_mdp_task)
# put blocks until the queue is empty, so we guarantee that the queue is empty while we're under lock
tasks = [demanded_mdp_task]
self.mdp_exec_queue.put((mdp_goal, tasks, self._get_guarantees_for_batch(tasks)[1]))
rospy.loginfo('Queued up demanded task: %s' % (demanded_mdp_task.action.name))
self.executing = prior_execution_state
def cancel_active_task(self):
"""
Called to cancel the task which is currently executing.
If something is being executed we handle this by simply pausing and restarting execution.
pause_execution is often called before this. (this is always the case currently)
"""
if self.executing:
# save the current executing tasks to drop later
with self.state_lock:
self.to_cancel = set([m.task.task_id for m in self.active_batch])
self.pause_execution()
with self.state_lock:
self.executing = True
def cancel_task(self, task_id):
""" Called when a request is received to cancel a task. The currently executing one is checked elsewhere. """
rospy.logwarn('Cancelling individual tasks is not yet implemented')
return False
def clear_schedule(self):
""" Called to clear all tasks from schedule, with the exception of the currently executing one. """
with self.state_lock:
prior_execution_state = self.executing
# this cleans up the current execution and sets self.executing to false
self.pause_execution()
# (try to) make sure the queues are empty (there's a chance that between the join and next state_lock that something could be added).
self.mdp_exec_queue.join()
with self.state_lock:
now = rospy.get_rostime()
self.log_task_events((m.task for m in self.normal_tasks), TaskEvent.DROPPED, now, description = 'Schedule was cleared')
self.normal_tasks.clear()
self.log_task_events((m.task for m in self.time_critical_tasks), TaskEvent.DROPPED, now, description = 'Schedule was cleared')
self.time_critical_tasks.clear()
self.executing = prior_execution_state
self.republish_schedule()
rospy.loginfo('All tasks cleared')
def republish_schedule(self):
"""
Notify schedule-publishing thread to update and publish schedule
"""
self.update_schedule_condition.acquire()
self.update_schedule_condition.notify()
self.update_schedule_condition.release()
def publish_schedule(self):
"""
Loops continuous publishing the upcoming tasks to be executed.
It is challenging to produce a list of the tasks that will be executed and when from this, so the compromises is that
ExecutionStatus contains the active batch with their execution_times set to now, all time-critical tasks and the next self.batch_limit normal tasks with their start time set to the end time of the current active batch.
"""
while not rospy.is_shutdown():
# all encompassing try/catch to make sure this loop does not go down
try:
# copy all relevant entries under lock
# we're taking a deepcopy as we might mess around with the times a bit
with self.state_lock:
expected_completion_time = deepcopy(self.expected_completion_time)
active_batch = deepcopy(self.active_batch)
normal_tasks = deepcopy(self.normal_tasks)
time_critical_tasks = deepcopy(self.time_critical_tasks)
now = rospy.get_rostime()
# todo: fill this value better
expected_end_of_batch = rospy.get_rostime() + rospy.Duration(120)
# start from the time_cr
schedule = ExecutionStatus(currently_executing = len(active_batch) > 0)
all_tasks = ExecutionStatus(currently_executing = len(active_batch) > 0)
schedule.header.stamp = now
all_tasks.header.stamp = now
for m in active_batch:
m.task.execution_time = now
schedule.execution_queue.append(m.task)
all_tasks.execution_queue.append(m.task)
# schedule.execution_queue += [m.task for m in time_critical_tasks]
all_tasks.execution_queue += [m.task for m in time_critical_tasks]
all_tasks.execution_queue += [m.task for m in normal_tasks]
all_tasks.execution_queue = sorted(all_tasks.execution_queue, key=lambda x: x.start_after)
all_tasks.execution_queue = sorted(all_tasks.execution_queue, key=lambda x: x.priority)
self.schedule_publisher.publish(schedule)
self.all_tasks_schedule_publisher.publish(all_tasks)
self.update_schedule_condition.acquire()
self.update_schedule_condition.wait()
self.update_schedule_condition.release()
except Exception, e:
rospy.logwarn('Caught exception in publish_schedule loop: %s' % e)
rospy.sleep(1)
if __name__ == '__main__':
executor = MDPTaskExecutor()
rospy.spin()
# create a schedule class which handles blocking until execution and manages the various changes
|
mit
| -8,600,928,490,053,119,000
| 46.717179
| 306
| 0.572468
| false
| 4.256642
| false
| false
| false
|
AaronRegan/ObjectTracker
|
hog.py
|
1
|
3796
|
# import the necessary packages
from __future__ import print_function
from non_max_suppression import non_max_suppression
from myqueue import myqueue
from frames import frames
from object import Object
import numpy as np
import argparse
import datetime
import imutils
import cv2
import time
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
# otherwise, we are reading from a video file
else:
print("[INFO] starting video file thread...")
camera = myqueue(args["video"]).start()
time.sleep(1.0)
i = 0
centerX = 0
centerY = 0
objList = []
meas = []
pred = []
mp = np.array((2, 1), np.float32) # measurement
tp = np.zeros((2, 1), np.float32) # tracked / prediction
kalman = cv2.KalmanFilter(4, 2)
kalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
kalman.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
kalman.processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.03
def onPed(x, y):
global mp, meas
mp = np.array([[np.float32(x)], [np.float32(y)]])
meas.append((x, y))
def kalPredict(mp):
global tp, pred
kalman.correct(mp)
tp = kalman.predict()
pred.append((int(tp[0]), int(tp[1])))
def paint(tp, xA, yA, xB, yB):
global frame, pred
# cv2.circle(frame, ((tp[0]), (tp[1])), 3, (0, 0, 255), -1)
cv2.rectangle(frame, ((tp[0]) - ((xB - xA) / 2), (tp[1]) + (yB - yA) / 2),
(((tp[0]) + ((xB - xA) / 2)), ((tp[1]) - (yB - yA) / 2)), (0, 0, 255), 2)
fps = frames().start()
# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# loop over the image paths
while camera.more():
frame = camera.read()
frame = imutils.resize(frame, width=600)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# start = datetime.datetime.now()
# detect people in the image
(rects, weights) = hog.detectMultiScale(frame, winStride=(8, 8),
padding=(32, 32), scale=1.05)
# print("[INFO] detection took: {}".format(
#(datetime.datetime.now() - start).total_seconds()))
# apply non-maxima suppression to the bounding boxes using a
# fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# draw the final bounding boxes
for (xA, yA, xB, yB) in pick:
i = i+1
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
centerX = (xB + xA) / 2
centerY = (yB + yA) / 2
obj = Object(centerX, centerY, i)
objList.append(obj)
onPed(centerX, centerY)
kalPredict(mp)
paint(tp, xA, yA, xB, yB)
cv2.putText(frame, "Queue Size: {}".format(camera.Q.qsize()),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
peds_found = "Found " + str(len(pick)) + " Pedestrians"
cv2.putText(frame, peds_found, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# show the output images
cv2.imshow("HOG", frame)
cv2.waitKey(1)
fps.update()
k = cv2.waitKey(27) & 0xff
if k == 27:
break
fps.stop()
for objects in range(len(objList) - 1):
print(str(objList[objects]))
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
camera.stop()
|
mit
| -1,399,075,531,900,514,000
| 32.59292
| 110
| 0.606164
| false
| 2.826508
| false
| false
| false
|
ReconCell/smacha
|
smacha/test/smacha_test_examples/random_outcomes.py
|
1
|
6029
|
#!/usr/bin/env python
import smach
import random
class RandomOutcomeState(smach.State):
def __init__(self, input_keys = ['outcome'], output_keys = ['outcome'], callbacks = {}, outcomes=['succeeded']):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=outcomes)
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
return userdata.outcome
class CallbacksState(smach.State):
def __init__(self, input_keys=[], output_keys=[], callbacks=[]):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded'])
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
return 'succeeded'
@smach.cb_interface(input_keys=[],
output_keys=['outcome'],
outcomes=['foo_0', 'foo_1', 'foo_2'])
def outcome_randomize_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.choice(list(self._outcomes))
userdata.outcome = lambda_cb(userdata)
return 'succeeded'
RandomOutcomeState.outcome_randomize_lambda_cb = outcome_randomize_lambda_cb
@smach.cb_interface(input_keys=[],
output_keys=['outcome'],
outcomes=[])
def outcome_foo_0_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.choice(list(self._outcomes))
userdata.outcome = lambda_cb(userdata)
return 'succeeded'
CallbacksState.outcome_foo_0_lambda_cb = outcome_foo_0_lambda_cb
@smach.cb_interface(input_keys=[],
output_keys=['outcome'],
outcomes=[])
def outcome_foo_1_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.choice(list(self._outcomes))
userdata.outcome = lambda_cb(userdata)
return 'succeeded'
CallbacksState.outcome_foo_1_lambda_cb = outcome_foo_1_lambda_cb
@smach.cb_interface(input_keys=[],
output_keys=['outcome'],
outcomes=[])
def outcome_foo_2_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.choice(list(self._outcomes))
userdata.outcome = lambda_cb(userdata)
return 'succeeded'
CallbacksState.outcome_foo_2_lambda_cb = outcome_foo_2_lambda_cb
def main():
sm = smach.StateMachine(outcomes=['final_outcome'])
with sm:
smach.StateMachine.add('RANDOMIZE',
RandomOutcomeState(callbacks = ['outcome_randomize_lambda_cb'], outcomes=['foo_0', 'foo_1', 'foo_2']),
transitions={'foo_0':'FOO_0',
'foo_1':'FOO_1',
'foo_2':'FOO_2'})
smach.StateMachine.add('FOO_0',
CallbacksState(callbacks = ['outcome_foo_0_lambda_cb']),
transitions={'succeeded':'RANDOMIZE'})
smach.StateMachine.add('FOO_1',
CallbacksState(callbacks = ['outcome_foo_1_lambda_cb']),
transitions={'succeeded':'RANDOMIZE'})
smach.StateMachine.add('FOO_2',
CallbacksState(callbacks = ['outcome_foo_2_lambda_cb']),
transitions={'succeeded':'final_outcome'})
outcome = sm.execute()
if __name__ == '__main__':
main()
|
bsd-3-clause
| -3,053,937,580,244,111,000
| 35.107784
| 141
| 0.532427
| false
| 3.937949
| false
| false
| false
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.1/Lib/distutils/command/build_clib.py
|
1
|
8098
|
"""distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
__revision__ = "$Id: build_clib.py 72379 2009-05-06 07:26:24Z tarek.ziade $"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib(Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib', 'b',
"directory to build C/C++ libraries to"),
('build-temp', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
def finalize_options(self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
def run(self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
def check_library_list(self, libraries):
"""Ensure that the list of libraries is valid.
`library` is presumably provided as a command option 'libraries'.
This method checks that it is a list of 2-tuples, where the tuples
are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(libraries, list):
raise DistutilsSetupError(
"'libraries' option must be a list of tuples")
for lib in libraries:
if not isinstance(lib, tuple) and len(lib) != 2:
raise DistutilsSetupError(
"each element of 'libraries' must a 2-tuple")
name, build_info = lib
if not isinstance(name, str):
raise DistutilsSetupError(
"first element of each tuple in 'libraries' "
"must be a string (the library name)")
if '/' in name or (os.sep != '/' and os.sep in name):
raise DistutilsSetupError("bad library name '%s': "
"may not contain directory separators" % lib[0])
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'libraries' "
"must be a dictionary (build info)")
def get_library_names(self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
filenames.extend(sources)
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
|
mit
| -2,797,674,694,629,372,000
| 37.379147
| 76
| 0.568782
| false
| 4.614245
| false
| false
| false
|
illagrenan/django-make-app
|
django_make_app/renderers.py
|
1
|
1282
|
# -*- encoding: utf-8 -*-
# ! python2
from __future__ import (absolute_import, division, print_function, unicode_literals)
from jinja2 import FileSystemLoader, Environment
class TemplateRenderer(object):
def __init__(self, templates_directory, template_name, item):
"""
:type templates_directory: os.path
:type template_name: unicode
:type item: dict
"""
self._templates_directory = templates_directory
self.template_name = template_name
self.item = item
def _render_from_template(self, template_name, **kwargs):
loader = FileSystemLoader(self._templates_directory)
env = Environment(loader=loader)
template = env.get_template(template_name)
render = template.render(**kwargs)
render = render.replace("[[", "{{")
render = render.replace("]]", "}}")
render = render.replace("[%", "{%")
render = render.replace("%]", "%}")
return render
def render(self, context):
"""
:type context: dict
"""
if "_model" in self.item:
context.update({
"current_model": self.item.get('_model')
})
return self._render_from_template(self.template_name, **context)
|
mit
| -4,592,002,837,051,133,000
| 27.488889
| 84
| 0.583463
| false
| 4.259136
| false
| false
| false
|
clarete/curdling
|
curdling/services/base.py
|
1
|
3236
|
from __future__ import absolute_import, print_function, unicode_literals
from ..signal import Signal, SignalEmitter
from ..util import logger
from distlib.compat import queue
import sys
import threading
import time
import traceback
# See `Service._worker()`. This is the sentinel that gently stops the iterator
# over there.
SENTINEL = (None, {})
# Number of threads that a service will spawn by default.
DEFAULT_CONCURRENCY = 2
class Service(SignalEmitter):
def __init__(self, size=DEFAULT_CONCURRENCY, **args):
super(Service, self).__init__()
self.size = size
self.env = args.get('env')
self.conf = args.pop('conf', {})
self.index = args.pop('index', None)
self.logger = logger(__name__)
# Components to implement the thread pool
self._queue = queue.Queue()
self.pool = []
# Declaring signals
self.started = Signal()
self.finished = Signal()
self.failed = Signal()
def queue(self, requester, **data):
self.logger.debug('%s.queue(from="%s", data="%s")', self.name, requester, data)
self._queue.put((requester, data))
return self
def start(self):
self.logger.debug('%s.start()', self.name)
for _ in range(self.size):
worker = threading.Thread(target=self._worker)
worker.daemon = True
worker.start()
self.pool.append(worker)
return self
def join(self):
# We need to separate loops cause we can't actually tell which thread
# got each sentinel
for worker in self.pool:
self._queue.put(SENTINEL)
for worker in self.pool:
worker.join()
self.workers = []
def handle(self, requester, sender_data):
raise NotImplementedError(
"The service subclass should override this method")
def __call__(self, requester, **kwargs):
return self.handle(requester, kwargs)
# -- Private API --
def _worker(self):
name = '{0}[{1}]'.format(self.name, threading.current_thread().name)
# If the service consumer invokes `.queue(None, None)` it causes the
# worker to die elegantly by matching the following sentinel:
for requester, sender_data in iter(self._queue.get, SENTINEL):
self.logger.debug('%s.run(data="%s")', name, sender_data)
try:
self.emit('started', self.name, **sender_data)
result = self(requester, **sender_data) or {}
self._queue.task_done()
except BaseException:
fname, lineno, fn, text = traceback.extract_tb(sys.exc_info()[2])[0]
self.logger.exception(
'%s.run(from="%s", data="%s") failed:\n'
'%s:%d (%s) %s',
name, requester, sender_data,
fname, lineno, fn, text,
)
sender_data.update(exception=sys.exc_info()[1])
self.emit('failed', self.name, **sender_data)
else:
self.logger.debug('%s.run(data="%s"): %s', name, sender_data, result)
self.emit('finished', self.name, **result)
|
gpl-3.0
| -2,457,431,602,473,947,000
| 33.425532
| 87
| 0.57293
| false
| 4.055138
| false
| false
| false
|
pazagra/catkin_ws
|
src/RGBDHand/src/Planeees.py
|
1
|
4011
|
import roslib
import rospy
import sys
import timeit
import os
import Image_Process
import cv2
import cv2.cv as cv
import numpy as np
import BoW
import Analysis
import Segmentation
from sklearn import cluster
import Descriptors
class Learn:
def __init__(self):
path = "/media/iglu/Data/Dataset/DatasetIglu/Dataset_united/Ana_point/"
i = 0
Seg = Segmentation.Segmentation()
f = open(path+"List.txt",'r')
for line in f:
print "Starting Training"
start_time = timeit.default_timer()
# code you want to evaluate
Time = line
file1 = next(f).rstrip('\n')
file2 = next(f).rstrip('\n')
Label = next(f).rstrip('\n')
RGB = cv2.imread(path+"RGB/"+file1) #[:-4]+"_RGB.jpg"
Depth = cv2.imread(path+"Depth/"+file2) #[:-4]+"_Depth.png"
Mesh = []
for i in xrange(0, Depth.shape[0]):
for j in xrange(0, Depth.shape[1]):
Mesh.append((i, j, Depth[i][j][0]))
kmeans = cluster.KMeans(Mesh,8)
print kmeans.n_clusters
# Depthv = self.Inpaint(Depth)
# Seg.CreateCloud(Depth)
# L = Seg.PlaneSegmentation()
# image = np.zeros(RGB.shape, RGB.dtype)
# depth2 = np.zeros(Depth.shape, Depth.dtype)
# for data in L:
# image[int(data[0]),int(data[1])] = RGB[int(data[0]),int(data[1])]
# depth2[int(data[0]), int(data[1])] = Depth[int(data[0]), int(data[1])]
# print Depth[int(data[0]), int(data[1])]
# Seg.CreateCloud(depth2)
# L = Seg.PlaneSegmentation()
# image2 = np.zeros(image.shape, image.dtype)
# depth3 = np.zeros(depth2.shape, depth2.dtype)
# for data in L:
# image2[int(data[0]),int(data[1])] = image[int(data[0]),int(data[1])]
# depth3[int(data[0]), int(data[1])] = depth2[int(data[0]), int(data[1])]
# print Depth[int(data[0]), int(data[1])]
elapsed = timeit.default_timer() - start_time
print "Tiempo: " + elapsed.__str__()
cv2.imshow("RGB", RGB )
cv2.imshow("Depthv", depth2)
cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()
# print "Enviado "+file+" "+i.__str__()
# if i >150:
# break
def Inpaintv1(self,Depth):
Depth_Small = Depth
Temp2 = Depth
x1 = int(Depth.shape[0] * 0.2)
x2 = int(Depth.shape[1] * 0.2)
x3 = Depth.shape[2]
cv2.resize(Depth, (x1, x2), Depth_Small)
Temp = Depth_Small
mask = (Depth_Small == 0)
zeros = np.zeros(Depth_Small.shape, Depth_Small.dtype)
ones = np.ones(Depth_Small.shape, Depth_Small.dtype)
ones *= 255
maskk = np.where(mask == True, ones, zeros)
maskk = maskk[:, :, 0]
cv2.inpaint(Depth_Small, maskk, 10.0, cv2.INPAINT_TELEA, Temp)
cv2.resize(Temp, (Depth.shape[0], Depth.shape[1]), Temp2)
return Temp2
def Inpaint(self,Depth):
Depth_Small = Depth
Temp2 = Depth
Temp = Depth_Small
mask = (Depth_Small == 0)
zeros = np.zeros(Depth_Small.shape, Depth_Small.dtype)
ones = np.ones(Depth_Small.shape, Depth_Small.dtype)
ones *= 255
maskk = np.where(mask == True, ones, zeros)
maskk = maskk[:, :, 0]
cv2.inpaint(Depth_Small, maskk, 30.0, cv2.INPAINT_TELEA, Temp)
Temp2 = Temp
return Temp2
def main(args):
# Learn()
oll = np.zeros(25)
list = ['glass','Nesquik cereales','fork','bowl','Milk Box','Coke','plate','Heinz ketchup','Apple','lime','orange','Green tea box (Lipton)','cofee mug','Special K','water bottle','banana','Bote Pringles','pitcher','kleenex box','Spoon','Diet Coke','Mayonaise Heinz','Instant noodles','knife','lemon']
if __name__ == '__main__':
main(sys.argv)
|
gpl-3.0
| 117,786,366,276,852,400
| 37.576923
| 304
| 0.540763
| false
| 3.158268
| false
| false
| false
|
carlosperate/LightUpPi-Alarm
|
LightUpHardware/test/HardwareSwitch_test.py
|
1
|
4048
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Unit test for the HardwareSwitch module.
#
# Copyright (c) 2015 carlosperate https://github.com/carlosperate/
#
# Licensed under The MIT License (MIT), a copy can be found in the LICENSE file
#
# These test require the Wemo Switch to be on the network at the defined IP
# address.
#
from __future__ import unicode_literals, absolute_import
import io
import mock
import unittest
from time import sleep
try:
import LightUpHardware.HardwareSwitch as HardwareSwitch
from LightUpHardware.pywemoswitch.WemoSwitch import WemoSwitch
except ImportError:
import os
import sys
file_dir = os.path.dirname(os.path.realpath(__file__))
package_dir = os.path.dirname(os.path.dirname(file_dir))
sys.path.insert(0, package_dir)
import LightUpHardware.HardwareSwitch as HardwareSwitch
from LightUpHardware.pywemoswitch.WemoSwitch import WemoSwitch
class HardwareSwitchTestCase(unittest.TestCase):
"""
Tests for HardwareSwitch functions.
These test require the Wemo Switch to be on the network at the defined IP
address.
"""
#
# Helper methods
#
def assert_stderr(self, test_srderr, equal=False):
""" Checks the stderr error string and resets it for next test. """
if equal is True:
self.assertEqual(test_srderr.getvalue(), '')
else:
self.assertNotEqual(test_srderr.getvalue(), '')
test_srderr.truncate(0)
test_srderr.write('')
self.assertEqual(test_srderr.getvalue(), '')
#
# Tests
#
def test__get_switch(self):
"""
Tests if an error is set when a switch cannot be connected. Due to the
connection timeout this test can take several seconds to complete.
"""
# We capture stderr to check for invalid input IP
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# Invalid _coffee_switch_name causes to print an error
switch = HardwareSwitch._get_switch('127.0.0.1')
self.assert_stderr(test_srderr)
self.assertIsNone(switch)
# Test that the default IP returns a connected switch instance
switch = HardwareSwitch._get_switch()
self.assertEqual(type(switch), WemoSwitch)
def test_switch_on_off(self):
"""
Tests the switch Turns ON and OFF with the default input and a given
switch.
"""
state = HardwareSwitch.switch_on()
self.assertTrue(state)
sleep(1)
state = HardwareSwitch.switch_off()
self.assertFalse(state)
switch = HardwareSwitch._get_switch()
state = HardwareSwitch.switch_on(switch)
self.assertTrue(state)
sleep(1)
state = HardwareSwitch.switch_off(switch)
self.assertFalse(state)
def test_safe_on(self):
""" Tests the default switch Turns ON only if already ON. """
switch = HardwareSwitch._get_switch()
switch_is_on = switch.get_state()
if switch_is_on is True:
switch.turn_off()
switch_is_on = switch.get_state()
self.assertFalse(switch_is_on)
HardwareSwitch.safe_on()
switch_is_on = switch.get_state()
self.assertTrue(switch_is_on)
# We capture stderr to check for swtich already ON when called and
# mock the turn off method to check if it was called
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
with mock.patch('LightUpHardware.pywemoswitch.WemoSwitch') as \
mock_switch:
self.assert_stderr(test_srderr, True)
HardwareSwitch.safe_on()
self.assertEqual(mock_switch.turn_off.call_count, 0)
self.assert_stderr(test_srderr)
switch_is_on = switch.get_state()
self.assertTrue(switch_is_on)
# to clean up, turn the switch off
sleep(1)
switch.turn_off()
if __name__ == '__main__':
unittest.main()
|
mit
| -2,191,243,379,043,929,300
| 33.305085
| 79
| 0.63414
| false
| 3.949268
| true
| false
| false
|
shub0/algorithm-data-structure
|
python/BST_iterator.py
|
1
|
1180
|
#! /usr/bin/python
'''
Implement an iterator over a binary search tree (BST). Your iterator will be initialized with the root node of a BST.
Calling next() will return the next smallest number in the BST.
Note: next() and hasNext() should run in average O(1) time and uses O(h) memory, where h is the height of the tree.
'''
from node_struct import TreeNode
class BSTIterator:
# @param root, a binary search tree's root node
def __init__(self, root):
self.root = root
self.path = list()
next_node = root
while next_node:
self.path.append(next_node)
next_node = next_node.left
# @return a boolean, whether we have a next smallest number
def hasNext(self):
return len(self.path) > 0
# @return an integer, the next smallest number
def next(self):
res = self.path.pop()
if res.right:
next_node = res.right
while next_node:
self.path.append(next_node)
next_node = next_node.left
return res.val
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next())
|
bsd-3-clause
| 5,504,190,055,761,006,000
| 30.052632
| 117
| 0.620339
| false
| 3.710692
| false
| false
| false
|
Loudr/pale
|
pale/endpoint.py
|
1
|
25122
|
# -*- coding: utf-8 -*-
import datetime
import json
import logging
import sys
import threading
import arrow
from pale import config as pale_config
from pale.arguments import BaseArgument
from pale.fields import ResourceField, ListField, ResourceListField
from pale.errors import APIError, ArgumentError, AuthenticationError
from pale.meta import MetaHasFields
from pale.resource import NoContentResource, Resource, DebugResource
from pale.response import PaleRaisedResponse
_tls = threading.local()
def get_current_context():
"""Return the context associated with the current request."""
return _tls.current_context
def set_current_context(context):
"""Set the context associated with the current request."""
_tls.current_context = context
class PaleDefaultJSONEncoder(json.JSONEncoder):
"""The default JSON Encoder for Pale.
The main difference between this and Python's default JSON encoder
is that this encoder attempts to serialize datetimes to ISO format,
and tries to call a `to_dict` method on the passed in object before
giving up.
"""
def default(self, obj):
"""Default JSON encoding."""
try:
if isinstance(obj, datetime.datetime):
# do the datetime thing, or
encoded = arrow.get(obj).isoformat()
else:
# try the normal encoder
encoded = json.JSONEncoder.default(self, obj)
except TypeError as e:
# if that fails, check for the to_dict method,
if hasattr(obj, 'to_dict') and callable(obj.to_dict):
# and use it!
encoded = obj.to_dict()
else:
raise e
return encoded
class Endpoint(object):
"""Base-class for implemented Endpoints."""
__metaclass__ = MetaHasFields
_response_class = None
_json_serializer = PaleDefaultJSONEncoder()
_default_cache = 'no-cache'
@classmethod
def _fix_up_fields(cls):
"""Add names to all of the Endpoint's Arguments.
This method will get called on class declaration because of
Endpoint's metaclass. The functionality is based on Google's NDB
implementation."""
cls._arguments = dict()
if cls.__module__ == __name__: # skip the classes in this file
return
for name in set(dir(cls)):
attr = getattr(cls, name, None)
if isinstance(attr, BaseArgument):
if name.startswith('_'):
raise TypeError("Endpoint argument %s cannot begin with "
"an underscore, as these attributes are reserved "
"for instance variables of the endpoint object, "
"rather than for arguments to your HTTP Endpoint."
% name)
attr._fix_up(cls, name)
cls._arguments[attr.name] = attr
def _set_response_class(self, response_class):
"""Set the response class for this endpoint.
This is usually only called by the Pale adapter,
and intended to be called with the Response object
of the HTTP layer that you're using.
"""
self._response_class = response_class
@classmethod
def _set_json_serializer(cls, serializer):
cls._json_serializer = serializer
@classmethod
def _metadata(cls, *args, **kwargs):
return dict(**kwargs)
def _handle(self, context):
"""The meat of the API logic.
This method is intended to be overridden by subclasses,
and should perform the core logic of the API method in question.
"""
pass
def _finally(self):
"""Executed after the success, or failure, of _execute()."""
pass
def _execute(self, request, **kwargs):
"""The top-level execute function for the endpoint.
This method is intended to remain as-is, and not be overridden.
It gets called by your HTTP framework's route handler, and performs
the following actions to process the request:
``authenticate_request``
Validate the Bearer token, populate the ``current_user``, and make
sure that the token covers the scope needed to call the requested
method.
*
*
``parse arguments``
The argument parser is responsible for:
- First, coercing and patching any parameters that might require
it due to versioning (i.e. the caller is using an old API
version that supports `index` as a parameter for pagination,
but the current version uses the name `offset`)
- Second, iterating through the endpoint's supported arguments
and validating that the params passed in comply with the
endpoint's requirements
- Third, populating the `context.args` array with the validated
arguments
If any of the arguments are invalid, then the Argument parser will
raise an ArgumentError that bubbles up to the `try/catch` block of
the execute method.
*
*
``before handler``
The before_handlers are specified by the Endpoint definition, and
are intended to supporty DRY-ing up your codebase. Have a set of
Endpoints that all need to grab an object from the ORM based on the
same parameter? Make them inherit from an Endpoint subclass that
performs that task in a before_handler!
*
*
``handle``
The core logic of your API endpoint, as implemented by you in your
Endpoint subclass. The API Framework expects ``handle`` to return
a dictionary specifying the response object and the JSON key that
it should hang off of, or a tuple of a dictionary and an HTTP status
code.
*
*
``after_handler``
Like the before_handlers, the ``after_handlers`` happen after the
handle method, and allow the endpoint developer to re-use code for
post-processing data from an endpoint.
*
*
``render response``
Like the argument parser, the response renderer is responsible for
a few things:
- First, it converts the ORM objects into JSON-serializable
Python dictionaries using the Resource objects defined by the
API implementation,
- Second, it does any version parameter coersion, renaming and
reformatting the edge version of the response to match the
version requested by the API caller,
- and Third, it serializes the Python dictionary into the response
format requested by the API caller (right now, we only support
JSON responses, but it'd be reasonble to support something like
HTML or XML or whatever in the future).
The rendered JSON text is then returned as the response that should
be sent by your HTTP framework's routing handler.
*
*
``_after_response_handler``
The `_after_response_handlers` are specified by the Endpoint
definition, and enable manipulation of the response object before it
is returned to the client, but after the response is rendered.
Because these are instancemethods, they may share instance data
from `self` specified in the endpoint's `_handle` method.
``_finalize_content``
The `_finalize_content` method is overridden by the Endpoint and is called
after the response is rendered into a serializable result.
This method is called with two arguments, the context and the rendered content,
and expected to return updated rendered content.
For in-place modification of dicts, this method will still be expected
to return the given argument.
``_allow_cors``
This value is set to enable CORs for a given endpoint.
When set to a string it supplies an explicit value to
'Access-Control-Allow-Origin'.
Set to True, this will allow access from *all* domains;
Access-Control-Allow-Origin = "*"
"""
try:
self._create_context(request)
self._authenticate()
context = get_current_context()
self._parse_args()
if hasattr(self, '_before_handlers') and \
isinstance(self._before_handlers, (list, tuple)):
for handler in self._before_handlers:
handler(context)
context.handler_result = self._handle(context)
if hasattr(self, '_after_handlers') and \
isinstance(self._after_handlers, (list, tuple)):
for handler in self._after_handlers:
handler(context)
self._render()
response = context.response
# After calling ._render(), the response is ready to go, so we
# shouldn't need to handle any other exceptions beyond this point.
except AuthenticationError as e:
if hasattr(e, 'message') and e.message is not None:
message = e.message
else:
message = "You don't have permission to do that."
err = APIError.Forbidden(message)
response = self._response_class(*err.response)
response.headers["Content-Type"] = 'application/json'
except ArgumentError as e:
err = APIError.UnprocessableEntity(e.message)
response = self._response_class(*err.response)
response.headers["Content-Type"] = 'application/json'
except APIError as e:
response = self._response_class(*e.response)
response.headers["Content-Type"] = 'application/json'
except PaleRaisedResponse as r:
response = self._response_class(*r.response)
response.headers["Content-Type"] = 'application/json'
except Exception as e:
logging.exception("Failed to handle Pale Endpoint %s: %r", self.__class__.__name__,
e)
err = APIError.Exception(repr(e))
response = self._response_class(*err.response)
response.headers["Content-Type"] = 'application/json'
allow_cors = getattr(self, "_allow_cors", None)
if allow_cors is True:
response.headers['Access-Control-Allow-Origin'] = '*'
elif isinstance(allow_cors, basestring):
response.headers['Access-Control-Allow-Origin'] = allow_cors
context.response = response
try:
if hasattr(self, '_after_response_handlers') and \
isinstance(self._after_response_handlers, (list, tuple)):
for handler in self._after_response_handlers:
handler(context, response)
except Exception as e:
logging.exception(
"Failed to process _after_response_handlers for Endpoint %s",
self.__class__.__name__)
raise
return response
def _create_context(self, request):
if pale_config.create_context is None:
raise ValueError((
"\n\nPale does not appear to be configured, as there is no "
"context creator currently set!\n\n"))
context = pale_config.create_context(self, request)
set_current_context(context)
def _authenticate(self):
if pale_config.authenticate_context is None:
raise ValueError((
"\n\nPale does not appear to be configured, as there is no "
"context authenticator currently set!\n\n"))
pale_config.authenticate_context(get_current_context())
def _patch_args(self):
# do something like:
# version = context.api_version
# coersion_dict = self.grab_version_coersion_info_from_impl(version)
# self.patched_args = self.coerce(self._raw_args, coersion_dict)
# but for now, just push the raw args through
context = get_current_context()
context.patched_args = context._raw_args
def _parse_args(self):
context = get_current_context()
self._patch_args()
parsed_args = dict()
if self._arguments is not None:
if not isinstance(self._arguments, dict):
raise ValueError("""Your API implementation is broken. This
endpoint's `arguments` value is a `%s` when it should be a dict
instead. Please see the Pale documentation for information on
how to fix the problem.""" % (type(self.arguments), ))
for arg_name, arg_obj in self._arguments.iteritems():
patched_value = context.patched_args.get(arg_name, None)
# HTTP libraries are crap, so we expect `patched_value` to
# be a list, which we strip out if the length is 1 and if the
# validator doesn't expect a list
if patched_value is not None and \
isinstance(patched_value, list) and \
len(patched_value) == 1 and \
list not in arg_obj.allowed_types:
patched_value = patched_value[0]
# validate will return the validated (and thus valid) value on
# success, or raise an ArgumentError if the value is invalid
validated_value = arg_obj.validate(patched_value, arg_name)
if validated_value is not None:
parsed_args[arg_name] = validated_value
context.args = parsed_args
def _parse_handler_result(self, result):
"""Parses the item(s) returned by your handler implementation.
Handlers may return a single item (payload), or a tuple that gets
passed to the Response class __init__ method of your HTTP layer.
_parse_handler_result separates the payload from the rest the tuple,
as well as providing the tuple so that it can be re-composed after
the payload has been run through the `_returns` Resource's renderer.
"""
if isinstance(result, (list, tuple)):
payload = result[0]
list_result = list(result)
else:
payload = result
list_result = [""]
return payload, list_result
def _render(self):
# first, serialize the Python objects in the response_dict into a dict
context = get_current_context()
rendered_content = dict()
unrendered_content, response_init_list = self._parse_handler_result(
context.handler_result)
if hasattr(unrendered_content, 'iteritems'):
for k, v in unrendered_content.iteritems():
# usually there should only be one key and value here
dict_val = self._returns._render_serializable(v, context)
# this is where object versioning should be implemented, but
# one outstanding question with it is, should this be the
# responsibility of the Resource object, or of the endpoint?
# coerced_dict_val = self.returns.versionify(dict_val,
# context.api_version)
rendered_content[k] = dict_val
else:
# maybe it's a nonetype or a simple string?
rendered_content = self._returns._render_serializable(
unrendered_content, context)
try:
if hasattr(self, '_finalize_content'):
rendered_content = self._finalize_content(context, rendered_content)
except:
logging.exception("Failed to complete %s._finalize_content",
self.__class__.__name__)
raise
# now build the response
if rendered_content is None and \
isinstance(self._returns, NoContentResource):
json_content = ''
else:
json_content = self._json_serializer.encode(rendered_content)
response_init_list[0] = json_content
response_init_tuple = tuple(response_init_list)
if self._response_class is None:
raise ValueError("""Error with Pale configuration. Attempted to
parse a handler result without a response class set on the endpoint.
This is probably an issue with the pale HTTP adapter you're using,
since that is where the response class is usually set.""")
context.response = self._response_class(*response_init_tuple)
# patch up cache-control
updated_cache_ctrl_from_endpoint = False
if len(response_init_tuple) > 2:
# headers is the 3rd arg for both flask and webapp2
headers = response_init_tuple[2]
cache_ctrl = headers.get('Cache-Control')
if cache_ctrl is not None:
context.response.headers['Cache-Control'] = cache_ctrl
updated_cache_ctrl_from_endpoint = True
if not updated_cache_ctrl_from_endpoint:
context.response.headers['Cache-Control'] = \
self._default_cache
# Add default json response type.
if len(json_content):
context.response.headers["Content-Type"] = 'application/json'
else:
del context.response.content_type
del context.response.content_length
status_code = getattr(context.response, "status_int", None) or context.response.status_code
if status_code == 200: # 200 OK
context.response.status = '204 No Content'
class ResourcePatch(object):
"""Represents a resource patch which is to be applied
to a given dictionary or object."""
def __init__(self, patch, resource, ignore_missing_fields=False):
self.patch = patch
self.resource = resource
self.ignore_missing_fields = ignore_missing_fields
def get_field_from_resource(self, field):
if isinstance(self.resource, DebugResource):
# no fields defined in a DebugResource
return None
try:
return self.resource._fields[field]
except KeyError:
if not self.ignore_missing_fields:
raise APIError.BadRequest(
"Field '%s' is not expected." % field)
return None
def get_resource_from_field(self, field):
assert isinstance(field, ResourceField)
return field.resource_type()
def cast_value(self, field, value):
if isinstance(field, ResourceListField):
if not isinstance(value, dict):
raise APIError.BadRequest(
"Expected nested object in list for %s" % field)
try:
resource = field.resource_type()
if isinstance(resource, DebugResource):
return value.copy()
new_object = dict()
for k,v in value.iteritems():
if not k in resource._fields and self.ignore_missing_fields:
new_object[k] = v
continue
_field = resource._fields[k]
if _field.property_name is not None:
k = _field.property_name
new_object[k] = self.cast_value(_field, v)
if not getattr(resource, "_underlying_model", None):
return new_object
return resource._underlying_model(**new_object)
except Exception:
logging.exception(
"Failed to cast value to _underlying_model of resource_type: %s" %
getattr(field, 'resource_type', None))
raise
# TODO: Use field to cast field back into a value,
# if possible.
return value
def apply_to_dict(self, dt):
for k,v in self.patch.iteritems():
field = self.get_field_from_resource(k)
if field is None:
dt[k] = v
continue
elif isinstance(v, dict):
# Recursive application.
resource = self.get_resource_from_field(field)
patch = ResourcePatch(v, resource,
ignore_missing_fields=self.ignore_missing_fields)
patch.apply_to_dict(dt[k])
elif isinstance(v, list):
if (not isinstance(field, ResourceListField) and
not isinstance(field, ListField)):
raise APIError.BadRequest(
"List not expected for field '%s'" % k)
new_list = []
for itm in v:
new_list.append(self.cast_value(field, itm))
dt[k] = new_list
else:
# Cast value and store
dt[k] = self.cast_value(field, v)
def apply_to_model(self, dt):
for k,v in self.patch.iteritems():
field = self.get_field_from_resource(k)
if field is None:
setattr(dt, k, v)
elif isinstance(v, dict):
# Recursive application.
resource = self.get_resource_from_field(field)
patch = ResourcePatch(v, resource,
ignore_missing_fields=self.ignore_missing_fields)
patch.apply_to_model(getattr(dt, k, None))
elif isinstance(v, list):
if (not isinstance(field, ResourceListField) and
not isinstance(field, ListField)):
raise APIError.BadRequest(
"List not expected for field '%s'" % k)
new_list = []
for itm in v:
new_list.append(self.cast_value(field, itm))
setattr(dt, k, new_list)
else:
# Cast value and set
setattr(dt, k, self.cast_value(field, v))
class PatchEndpoint(Endpoint):
"""Provides a base endpoint for implementing JSON Merge Patch requests.
See RFC 7386 @ https://tools.ietf.org/html/rfc7386
"""
MERGE_CONTENT_TYPE = 'application/merge-patch+json'
_http_method = "PATCH"
def _handle_patch(self, context, patch):
raise NotImplementedError("%s should override _handle_patch" %
self.__class__.__name__)
def _handle(self, context):
resource = getattr(self, "_resource", None)
if not isinstance(resource, Resource):
raise NotImplementedError(
"%s needs to define _resource: Resource which will be patched" %
self.__class__.__name__)
if (context.headers.get('Content-Type').lower() !=
self.MERGE_CONTENT_TYPE):
raise APIError.UnsupportedMedia("PATCH expects content-type %r" %
self.MERGE_CONTENT_TYPE)
try:
patch = ResourcePatch(patch=json.loads(context.body),
resource=resource)
except Exception, exc:
raise APIError.UnprocessableEntity(
"Could not decode JSON from request payload: %s" %
exc)
return self._handle_patch(context, patch)
class PutResourceEndpoint(Endpoint):
"""Provides a base endpoint for implementing JSON PUT resource.
See RFC 7386 @ https://tools.ietf.org/html/rfc7386
"""
MERGE_CONTENT_TYPE = 'application/json'
_http_method = "PUT"
def _handle_put(self, context, patch):
raise NotImplementedError("%s should override _handle_patch" %
self.__class__.__name__)
def _handle(self, context):
resource = getattr(self, "_resource", None)
if not isinstance(resource, Resource):
raise NotImplementedError(
"%s needs to define _resource: Resource which will be patched" %
self.__class__.__name__)
if (context.headers.get('Content-Type').lower() !=
self.MERGE_CONTENT_TYPE):
raise APIError.UnsupportedMedia("PATCH expects content-type %r" %
self.MERGE_CONTENT_TYPE)
try:
patch = ResourcePatch(patch=json.loads(context.body),
resource=resource)
except Exception, exc:
raise APIError.UnprocessableEntity(
"Could not decode JSON from request payload: %s" %
exc)
return self._handle_put(context, patch)
|
mit
| -4,322,117,158,030,105,000
| 39.454106
| 103
| 0.578656
| false
| 4.805279
| false
| false
| false
|
code-ape/SocialJusticeDataProcessing
|
category.py
|
1
|
6149
|
import tools
import settings
def base_demographic(data, demographic_questions):
breakdowns = {}
for question_num in demographic_questions:
responses = tools.get_responses_to_number(question_num, data)
title = tools.get_question_title(question_num, data)
values = tools.extract_vals_from_responses(responses)[0]
breakdown = create_breakdown(values)
breakdowns[title] = breakdown
return breakdowns
def generate_answer_response_lists(data, opinion_questions):
print("Generating answer response list.")
answer_response_dict = {}
for question_num in opinion_questions:
responses = tools.get_responses_to_number(question_num, data)
values = tools.extract_vals_from_responses(responses, data)[0]
title = tools.get_question_title(question_num, data)
index_breakdown = create_index_breakdown(values)
answer_response_dict[title] = index_breakdown
print("Done generating answer response list.")
return answer_response_dict
def generate_demographic_for_response_lists(answer_response_lists, data):
count = 0
question_dict = {}
for title, response_dict in answer_response_lists.iteritems():
question_num = tools.get_question_num_with_title(title, data)
answer_breakdown_dict = {}
for response_val, response_nums in response_dict.iteritems():
responses = []
for response_num in response_nums:
responses.append(data[response_num])
breakdowns = base_demographic(responses, settings.student_demographic_questions)
count += len(breakdowns)
answer_breakdown_dict[response_val] = breakdowns
question_dict[title] = answer_breakdown_dict
print("generate_demographic_for_response_lists did {} breakdowns.".format(count))
return question_dict
def calc_demographic_diff(base_demographic, opinion_demographic_dict):
opinion_demographic_diff_dict = {}
for question_name, answer_dict in opinion_demographic_dict.iteritems():
answer_diff_dict = {}
for choice, demographic in answer_dict.iteritems():
answer_diff_dict[choice] = create_demographic_diff(base_demographic, demographic)
opinion_demographic_diff_dict[question_name] = answer_diff_dict
return opinion_demographic_diff_dict
def find_interesting_demographic_changes(opinion_demographic_diff_dict):
interesting_demographic_changes = []
threshold = 0.3
counter = 0
for question_name, answer_dict in opinion_demographic_diff_dict.iteritems():
for choice, demographic in answer_dict.iteritems():
for title, breakdown in demographic.iteritems():
for answer, nums in breakdown.iteritems():
percent_shift = nums["percent_shift"]
if percent_shift > 25 or percent_shift < -25:
interesting_demographic_changes.append({
"question": question_name,
"question_choice": choice,
"demographic_title": title,
"demographic_answer": answer,
"percent_shift": percent_shift
})
counter += 1
print("Found {} interesting results".format(counter))
return interesting_demographic_changes
def save_interesting_demographics_changes_to_file(interesting_demographic_changes, path):
print("Saving {} interesting demographic change entries to: {}".format(
len(interesting_demographic_changes), path
))
with open(path, "w") as f:
for entry in interesting_demographic_changes:
f.write("Question: {}\n".format(entry["question"]))
f.write("Choice: {}\n".format(entry["question_choice"]))
f.write("Demographic Category: {}\n".format(entry["demographic_title"]))
f.write("Demographic: {}\n".format(entry["demographic_answer"]))
f.write("Shift: {}\n\n\n".format(entry["percent_shift"]))
print("Done saving entries.")
def print_breakdown(title, breakdown):
print("\n\nBreakdown for {}".format(title))
for val, nums in breakdown.iteritems():
print("{}: {}, {:.1f}%".format(val, nums['number'], nums['percentage']))
def create_breakdown(values):
answer_dict = {}
# really hacky way of handling answers with where multiple
# options could be choosen
for val in values:
choices = None
if not isinstance(val, list):
choices = [val]
else:
choices = val
for choice in choices:
if choice not in answer_dict:
answer_dict[choice] = 0
answer_dict[choice] += 1
breakdown_dict = {}
total_values = float(len(values))
for val, num in answer_dict.iteritems():
breakdown_dict[val] = {"number": num, "percentage": 100*num/total_values}
return breakdown_dict
def create_index_breakdown(values):
breakdown = {}
count = 0
for val in values:
choices = None
if not isinstance(val, list):
choices = [val]
else:
choices = val
for choice in choices:
if choice not in breakdown:
breakdown[choice] = []
breakdown[choice].append(count)
count+=1
return breakdown
def create_demographic_diff(base_demographic, contrast_demographic):
demographic_diff = {}
for title, breakdown in base_demographic.iteritems():
contrast_breakdown = contrast_demographic[title]
breakdown_diff = {}
for answer, nums in breakdown.iteritems():
contrast_nums = None
if answer in contrast_breakdown:
contrast_nums = contrast_breakdown[answer]
else:
contrast_nums = {"percentage": 0}
shift = contrast_nums["percentage"] - nums["percentage"]
breakdown_diff[answer] = {
"percent_shift": shift
}
demographic_diff[title] = breakdown_diff
return demographic_diff
|
apache-2.0
| -8,700,736,649,998,507,000
| 37.192547
| 93
| 0.623191
| false
| 4.168814
| false
| false
| false
|
drewverlee/art
|
tests/test_app_user_interaction.py
|
1
|
2114
|
#!/usr/local/bin/python
import unittest
import sys
from selenium import webdriver
servers = {
'live' : 'http://safe-sands-8472.herokuapp.com/',
'local': 'http://127.0.0.1:5000/'
}
class TestApp(unittest.TestCase):
BASE = servers['local']
@classmethod
def setUpClass(cls):
cls.c = webdriver.PhantomJS()
def setUp(self):
self.c.get(self.BASE)
def tearDown(self):
self.c.quit()
def test_app(self):
#test layout
self.assertEqual("DREW'S ART", self.c.find_element_by_class_name('logo').text)
self.assertTrue('about' in self.c.find_element_by_tag_name('nav').text.lower())
self.assertTrue('about' in self.c.find_element_by_id('fnav').text.lower())
self.assertEqual(3, len(self.c.find_elements_by_class_name('cover_art')))
self.assertEqual(4, len(self.c.find_elements_by_tag_name('meta')))
self.assertTrue('NY' in self.c.find_element_by_class_name('copy_right').text)
#test index/home
self.assertEqual('Home', self.c.title)
self.assertEqual(3, len(self.c.find_elements_by_class_name('cover_art')))
# test purchase
self.c.find_element_by_class_name('purchase_link').click()
self.assertEqual('Purchase', self.c.title)
self.assertTrue(self.c.find_element_by_class_name('art'))
self.assertTrue('purchasing' in self.c.find_element_by_class_name('purchase_info').text)
self.assertTrue('small' in self.c.find_element_by_class_name('price').text)
self.assertTrue(self.c.find_element_by_class_name('email-link'))
# test about
self.c.find_element_by_class_name('about_link').click()
self.assertEqual('About', self.c.title)
self.assertTrue(self.c.find_element_by_class_name('picture'))
self.assertTrue('drew' in self.c.find_element_by_class_name('text').text)
self.assertTrue(self.c.find_element_by_class_name('art'))
if __name__ == '__main__':
if len(sys.argv) > 1: TestApp.BASE = servers[sys.argv.pop()]
unittest.main()
|
mit
| 6,670,995,368,726,679,000
| 31.030303
| 96
| 0.625828
| false
| 3.267388
| true
| false
| false
|
Sult/daf
|
apps/corporations/models/corporations.py
|
1
|
1185
|
from django.db import models
#from django.conf import settings
#from config.storage import OverwriteStorage
#from utils.common import icon_size_name
from utils.connection import *
class CorporationApi(models.Model):
""" charactertype apis """
api = models.OneToOneField('apies.Api')
corporationid = models.BigIntegerField()
corporationname = models.CharField(max_length=254)
characterid = models.BigIntegerField()
def __unicode__(self):
return self.corporationname
#class CorporationIcon(models.Model):
#""" images related to characters """
#relation = models.ForeignKey("corporations.Corporation")
#size = models.IntegerField(choices=settings.IMAGE_SIZES)
#typeid = models.IntegerField(unique=True)
#icon = models.ImageField(
#upload_to="images/corporations/",
#storage=OverwriteStorage(),
#blank=True, null=True)
#class Meta:
#unique_together = ["size", "relation"]
#def __unicode__(self):
#return "Corporation Image %s" % icon_size_name(self.size)
##get list of wanted character icon sizes
#@staticmethod
#def icon_sizes():
#return [32, 64, 128, 256]
|
mit
| -2,615,864,952,555,058,000
| 27.902439
| 66
| 0.681857
| false
| 3.97651
| false
| false
| false
|
googleapis/python-compute
|
google/cloud/compute_v1/services/addresses/transports/rest.py
|
1
|
20223
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.auth.transport.requests import AuthorizedSession
from google.cloud.compute_v1.types import compute
from .base import AddressesTransport, DEFAULT_CLIENT_INFO
class AddressesRestTransport(AddressesTransport):
"""REST backend transport for Addresses.
The Addresses API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._prep_wrapped_messages(client_info)
def aggregated_list(
self,
request: compute.AggregatedListAddressesRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.AddressAggregatedList:
r"""Call the aggregated list method over HTTP.
Args:
request (~.compute.AggregatedListAddressesRequest):
The request object. A request message for
Addresses.AggregatedList. See the method
description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.AddressAggregatedList:
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/aggregated/addresses".format(
host=self._host, project=request.project,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.AggregatedListAddressesRequest.filter in request:
query_params["filter"] = request.filter
if compute.AggregatedListAddressesRequest.include_all_scopes in request:
query_params["includeAllScopes"] = request.include_all_scopes
if compute.AggregatedListAddressesRequest.max_results in request:
query_params["maxResults"] = request.max_results
if compute.AggregatedListAddressesRequest.order_by in request:
query_params["orderBy"] = request.order_by
if compute.AggregatedListAddressesRequest.page_token in request:
query_params["pageToken"] = request.page_token
if compute.AggregatedListAddressesRequest.return_partial_success in request:
query_params["returnPartialSuccess"] = request.return_partial_success
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.get(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.AddressAggregatedList.from_json(
response.content, ignore_unknown_fields=True
)
def delete(
self,
request: compute.DeleteAddressRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteAddressRequest):
The request object. A request message for
Addresses.Delete. See the method
description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/addresses/{address}".format(
host=self._host,
project=request.project,
region=request.region,
address=request.address,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.DeleteAddressRequest.request_id in request:
query_params["requestId"] = request.request_id
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.delete(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
def get(
self,
request: compute.GetAddressRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Address:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetAddressRequest):
The request object. A request message for Addresses.Get.
See the method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Address:
Use global external addresses for GFE-based external
HTTP(S) load balancers in Premium Tier.
Use global internal addresses for reserved peering
network range.
Use regional external addresses for the following
resources:
- External IP addresses for VM instances - Regional
external forwarding rules - Cloud NAT external IP
addresses - GFE based LBs in Standard Tier - Network
LBs in Premium or Standard Tier - Cloud VPN gateways
(both Classic and HA)
Use regional internal IP addresses for subnet IP ranges
(primary and secondary). This includes:
- Internal IP addresses for VM instances - Alias IP
ranges of VM instances (/32 only) - Regional internal
forwarding rules - Internal TCP/UDP load balancer
addresses - Internal HTTP(S) load balancer addresses
- Cloud DNS inbound forwarding IP addresses
For more information, read reserved IP address.
(== resource_for {$api_version}.addresses ==) (==
resource_for {$api_version}.globalAddresses ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/addresses/{address}".format(
host=self._host,
project=request.project,
region=request.region,
address=request.address,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.get(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.Address.from_json(response.content, ignore_unknown_fields=True)
def insert(
self,
request: compute.InsertAddressRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertAddressRequest):
The request object. A request message for
Addresses.Insert. See the method
description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Jsonify the request body
body = compute.Address.to_json(
request.address_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
)
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/addresses".format(
host=self._host, project=request.project, region=request.region,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.InsertAddressRequest.request_id in request:
query_params["requestId"] = request.request_id
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.post(url, headers=headers, data=body,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
def list(
self,
request: compute.ListAddressesRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.AddressList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListAddressesRequest):
The request object. A request message for Addresses.List.
See the method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.AddressList:
Contains a list of addresses.
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/addresses".format(
host=self._host, project=request.project, region=request.region,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.ListAddressesRequest.filter in request:
query_params["filter"] = request.filter
if compute.ListAddressesRequest.max_results in request:
query_params["maxResults"] = request.max_results
if compute.ListAddressesRequest.order_by in request:
query_params["orderBy"] = request.order_by
if compute.ListAddressesRequest.page_token in request:
query_params["pageToken"] = request.page_token
if compute.ListAddressesRequest.return_partial_success in request:
query_params["returnPartialSuccess"] = request.return_partial_success
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.get(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.AddressList.from_json(
response.content, ignore_unknown_fields=True
)
__all__ = ("AddressesRestTransport",)
|
apache-2.0
| -8,557,189,965,289,486,000
| 41.307531
| 105
| 0.608911
| false
| 4.652174
| false
| false
| false
|
ningirsu/stepmania-server
|
smserver/controllers/legacy/login.py
|
1
|
2496
|
""" Login controller """
from smserver.smutils.smpacket import smpacket
from smserver.smutils.smpacket import smcommand
from smserver.stepmania_controller import StepmaniaController
from smserver.resources import user_resource
from smserver import models
from smserver import exceptions
from smserver import __version__
class LoginController(StepmaniaController):
""" Controller use to manage SMO LOGIN packet """
command = smcommand.SMOClientCommand.LOGIN
require_login = False
def handle(self):
""" Handle a SMO login packet """
resource = user_resource.UserResource(self.server, self.conn.token, self.session)
if self.server.config.auth["autocreate"]:
login_func = resource.login_or_create
else:
login_func = resource.login
try:
user = login_func(self.packet["username"], self.packet["password"])
except exceptions.Forbidden as err:
self.send(smpacket.SMPacketServerNSSMONL(
packet=smpacket.SMOPacketServerLogin(
approval=1,
text=err.message
)
))
return
try:
resource.connect(user, pos=self.packet["player_number"])
except exceptions.Unauthorized as err:
self.send(smpacket.SMPacketServerNSSMONL(
packet=smpacket.SMOPacketServerLogin(
approval=1,
text=err.message
)
))
return
nb_onlines = models.User.nb_onlines(self.session)
max_users = self.server.config.server.get("max_users", -1)
if not self.users:
self._send_server_resume(nb_onlines, max_users)
self.send(smpacket.SMPacketServerNSSMONL(
packet=smpacket.SMOPacketServerLogin(
approval=0,
text="Player %s successfully login" % self.packet["username"]
)
))
self.send(models.Room.smo_list(self.session, self.active_users))
def _send_server_resume(self, nb_onlines, max_users):
self.send_message(self.server.config.server.get("motd", ""), to="me")
self.send_message(
"SMServer v%s, started on %s. %s/%s users online" % (
__version__,
self.server.started_at.strftime("%x at %X"),
nb_onlines + 1,
max_users if max_users > 0 else "--"
),
to="me")
|
mit
| 2,024,049,548,031,647,500
| 33.191781
| 89
| 0.588141
| false
| 3.918367
| false
| false
| false
|
iamshang1/Projects
|
Advanced_ML/Human_Activity_Recognition/LSTM/record_fetcher_between_subject.py
|
1
|
24384
|
import numpy as np
import glob
import sys
import random
class record_fetcher(object):
'''
creates feature arrays and labels from raw accelerometer/demographic data
splits features and labels between subjects into test/train sets
methods:
- fetch(batch_size,minibatch_size,binary,seed)
calculates summary statistics from raw accelerometer/demographic data and creates
input features and labels for lstm classifier
parameters:
- batch_size: integer
number of frames to use for each train/test instance
e.g. 1000 means each test/train instance represents 10 seconds of data
- minibatch_size: integer
number of frames to use for each set of summary statistics
e.g. 50 will calculate summary statistics over .5 second windows across each train/test instance
- binary: boolean (default True)
use True to set labels for ambulatory/non-ambulatory
use False to set labels for non-ambulatory/walking/running/upstairs/downstairs
- seed: integer (default None)
(optional) seed to use for random test/train splitting
outputs:
- numpy array representing training summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension 2 is the summary statistics and demographic data over each minibatch window
- numpy array representing testing summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension 2 is the summary statistics and demographic data over each minibatch window
- numpy array representing training activity label over each time window
- numpy array representing testing activity label over each time window
'''
def __init__(self):
#collect all valid subject ids
self.subjects = [102,103,105,106,107,108,110,112,113,114,115,116,117,118,119,120,\
121,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,139,140,\
142,143,144,146,148,149,150,151,152,153,154,155,156,157,159,160,161,162,163,\
164,165,166,169,170,171,172,173,174,175,177,178,179,180,181,182,183,184,185,\
186,187,188,189,190,191,192]
#categorize activity ids into ambulatory/non-ambulatory
self.dic1 = {
'ambulatory': [11,12,13,14,23,24,25,26,27,28,29,30,31,32,16,17,18,33,34],
'nonambulatory': [19,20,21,22]
}
#categorize activity ids into non-ambulatory/walking/running/upstairs/downstairs
self.dic2 = {
'nonambulatory': [19,20,21,22],
'walking': [11,12,13,14,23,24,25,26,27,28,29,30,31,32],
'running': [16,17,18],
'upstairs': [33],
'downstairs': [34]
}
#get filenames for all activity arrays
self.ambulatory = []
for i in self.dic1['ambulatory']:
self.ambulatory.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.nonambulatory = []
for i in self.dic1['nonambulatory']:
self.nonambulatory.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.walking = []
for i in self.dic2['walking']:
self.walking.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.running = []
for i in self.dic2['running']:
self.running.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.upstairs = []
for i in self.dic2['upstairs']:
self.upstairs.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.downstairs = []
for i in self.dic2['downstairs']:
self.downstairs.extend(glob.glob('../data/arrays/*_%i_*' % i))
def fetch(self,batch_size,minibatch_size,binary=True,seed=None):
'''
calculates summary statistics from raw accelerometer/demographic data and creates
input features and labels for lstm classifier
parameters:
- batch_size: integer
number of frames to use for each train/test instance
e.g. 1000 means each test/train instance represents 10 seconds of data
- minibatch_size: integer
number of frames to use for each set of summary statistics
e.g. 50 will calculate summary statistics over .5 second windows across each train/test instance
- binary: boolean (default True)
use True to set labels for ambulatory/non-ambulatory
use False to set labels for non-ambulatory/walking/running/upstairs/downstairs
- seed: integer (default None)
(optional) seed to use for random test/train splitting
outputs:
- numpy array representing training summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension is the summary statistics and demographic data over each minibatch window
- numpy array representing testing summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension is the summary statistics and demographic data over each minibatch window
- numpy array representing training activity label over each time window
- numpy array representing testing activity label over each time window
'''
#reserve subset of subject ids as test set
np.random.seed(seed)
X_test_subjects = np.random.choice(self.subjects,6)
X_train_list = []
y_train_list = []
X_test_list = []
y_test_list = []
batches = batch_size//minibatch_size
#for ambulatory/non-ambulatory classification
if binary:
for a in self.ambulatory:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,1]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,1]))
for a in self.nonambulatory:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([1,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([1,0]))
#for non-ambulatory/walking/running/upstairs/downstairs classification
else:
for a in self.nonambulatory:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([1,0,0,0,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([1,0,0,0,0]))
for a in self.walking:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,1,0,0,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,1,0,0,0]))
for a in self.running:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,0,1,0,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,0,1,0,0]))
for a in self.upstairs:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,0,0,1,0]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,0,0,1,0]))
for a in self.downstairs:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
#if subject id in test set, save features and labels to test set
if int(a[15:18]) in X_test_subjects:
X_test_list.append(batch)
y_test_list.append(np.array([0,0,0,0,1]))
#else save features and labels to train set
else:
X_train_list.append(batch)
y_train_list.append(np.array([0,0,0,0,1]))
#pair training X/y together and shuffle
print 'shuffling records'
Xy = zip(X_train_list,y_train_list)
random.shuffle(Xy)
#separate training X from y
X_train = np.array([record[0] for record in Xy])
y_train = np.array([record[1] for record in Xy])
print 'feature vector shape:', X_train.shape
print 'label vector shape:', y_train.shape
#pair testing X/y together and shuffle
Xy = zip(X_test_list,y_test_list)
random.shuffle(Xy)
#separate testing X from y
X_test = np.array([record[0] for record in Xy])
y_test = np.array([record[1] for record in Xy])
print 'feature vector shape:', X_test.shape
print 'label vector shape:', y_test.shape
return X_train, X_test, y_train, y_test
def _create_features(self,array):
'''
calculate summary statistics over time window
concatenate with normalized demographic data
the following features are calculated for each axis (X,Y,Z),
magnitude (sqrt of X^2+Y^2+Z^2), first differential of each axis,
and first differential of magnitude:
- mean, std, min, max
- 10,25,50,75,90 percentiles
- number of median crossings
- correlation with other axis
'''
#create features
mag = np.sqrt(array[:,0]**2+array[:,1]**2+array[:,2]**2)
x_mean = np.mean(array[:,0])
y_mean = np.mean(array[:,1])
z_mean = np.mean(array[:,2])
mag_mean = np.mean(mag)
x_std = np.std(array[:,0])
y_std = np.std(array[:,1])
z_std = np.std(array[:,2])
mag_std = np.std(mag)
x_10per = np.percentile(array[:,0],10)
x_25per = np.percentile(array[:,0],25)
x_50per = np.percentile(array[:,0],50)
x_75per = np.percentile(array[:,0],75)
x_90per = np.percentile(array[:,0],90)
x_med = np.median(array[:,0])
x_medcross = np.sum(np.diff((array[:,0]==x_med).astype(int))==1)
x_max = np.amax(array[:,0])
x_min = np.amin(array[:,0])
x_range = x_max - x_min
x_iqrange = x_75per - x_25per
y_10per = np.percentile(array[:,1],10)
y_25per = np.percentile(array[:,1],25)
y_50per = np.percentile(array[:,1],50)
y_75per = np.percentile(array[:,1],75)
y_90per = np.percentile(array[:,1],90)
y_med = np.median(array[:,1])
y_medcross = np.sum(np.diff((array[:,1]==y_med).astype(int))==1)
y_max = np.amax(array[:,1])
y_min = np.amin(array[:,1])
y_range = y_max - y_min
y_iqrange = y_75per - y_25per
z_10per = np.percentile(array[:,2],10)
z_25per = np.percentile(array[:,2],25)
z_50per = np.percentile(array[:,2],50)
z_75per = np.percentile(array[:,2],75)
z_90per = np.percentile(array[:,2],90)
z_med = np.median(array[:,2])
z_medcross = np.sum(np.diff((array[:,2]==z_med).astype(int))==1)
z_max = np.amax(array[:,2])
z_min = np.amin(array[:,2])
z_range = z_max - z_min
z_iqrange = z_75per - z_25per
mag_10per = np.percentile(mag,10)
mag_25per = np.percentile(mag,25)
mag_50per = np.percentile(mag,50)
mag_75per = np.percentile(mag,75)
mag_90per = np.percentile(mag,90)
mag_med = np.median(mag)
mag_medcross = np.sum(np.diff((mag==mag_med).astype(int))==1)
mag_max = np.amax(mag)
mag_min = np.amin(mag)
mag_range = mag_max - mag_min
mag_iqrange = mag_75per - mag_25per
xy_corr = np.correlate(array[:,0],array[:,1])
xz_corr = np.correlate(array[:,0],array[:,2])
yz_corr = np.correlate(array[:,1],array[:,2])
x_d1 = np.diff(array[:,0])
y_d1 = np.diff(array[:,1])
z_d1 = np.diff(array[:,2])
mag_d1 = np.diff(mag)
x_d1_mean = np.mean(x_d1)
y_d1_mean = np.mean(y_d1)
z_d1_mean = np.mean(z_d1)
mag_mean_d1 = np.mean(mag_d1)
x_d1_std = np.std(x_d1)
y_d1_std = np.std(y_d1)
z_d1_std = np.std(z_d1)
mag_std_d1 = np.std(mag_d1)
x_10per_d1 = np.percentile(x_d1,10)
x_25per_d1 = np.percentile(x_d1,25)
x_50per_d1 = np.percentile(x_d1,50)
x_75per_d1 = np.percentile(x_d1,75)
x_90per_d1 = np.percentile(x_d1,90)
x_med_d1 = np.median(x_d1)
x_medcross_d1 = np.sum(np.diff((x_d1==x_med_d1).astype(int))==1)
x_max_d1 = np.amax(x_d1)
x_min_d1 = np.amin(x_d1)
x_range_d1 = x_max_d1 - x_min_d1
x_iqrange_d1 = x_75per_d1 - x_25per_d1
y_10per_d1 = np.percentile(y_d1,10)
y_25per_d1 = np.percentile(y_d1,25)
y_50per_d1 = np.percentile(y_d1,50)
y_75per_d1 = np.percentile(y_d1,75)
y_90per_d1 = np.percentile(y_d1,90)
y_med_d1 = np.median(y_d1)
y_medcross_d1 = np.sum(np.diff((y_d1==y_med_d1).astype(int))==1)
y_max_d1 = np.amax(y_d1)
y_min_d1 = np.amin(y_d1)
y_range_d1 = y_max_d1 - y_min_d1
y_iqrange_d1 = y_75per_d1 - y_25per_d1
z_10per_d1 = np.percentile(z_d1,10)
z_25per_d1 = np.percentile(z_d1,25)
z_50per_d1 = np.percentile(z_d1,50)
z_75per_d1 = np.percentile(z_d1,75)
z_90per_d1 = np.percentile(z_d1,90)
z_med_d1 = np.median(z_d1)
z_medcross_d1 = np.sum(np.diff((z_d1==z_med_d1).astype(int))==1)
z_max_d1 = np.amax(z_d1)
z_min_d1 = np.amin(z_d1)
z_range_d1 = z_max_d1 - z_min_d1
z_iqrange_d1 = z_75per_d1 - z_25per_d1
mag_10per_d1 = np.percentile(mag_d1,10)
mag_25per_d1 = np.percentile(mag_d1,25)
mag_50per_d1 = np.percentile(mag_d1,50)
mag_75per_d1 = np.percentile(mag_d1,75)
mag_90per_d1 = np.percentile(mag_d1,90)
mag_med_d1 = np.median(mag_d1)
mag_medcross_d1 = np.sum(np.diff((mag_d1==mag_med_d1).astype(int))==1)
mag_max_d1 = np.amax(mag_d1)
mag_min_d1 = np.amin(mag_d1)
mag_range_d1 = mag_max_d1 - mag_min_d1
mag_iqrange_d1 = mag_75per_d1 - mag_25per_d1
xy_corr_d1 = np.correlate(x_d1,y_d1)
xz_corr_d1 = np.correlate(x_d1,z_d1)
yz_corr_d1 = np.correlate(y_d1,z_d1)
#concatenate all features
features = np.array([x_mean,x_mean,z_mean,x_std,y_std,z_std,xy_corr,xz_corr,yz_corr,\
x_10per,x_25per,x_50per,x_75per,x_90per,x_max,x_min,x_medcross,x_range,x_iqrange,\
y_10per,y_25per,y_50per,y_75per,y_90per,y_max,y_min,y_medcross,y_range,y_iqrange,\
z_10per,z_25per,z_50per,z_75per,z_90per,z_max,z_min,z_medcross,z_range,z_iqrange,\
mag_mean,mag_std,mag_10per,mag_25per,mag_50per,mag_75per,mag_90per,mag_max,mag_min,mag_medcross,mag_range,mag_iqrange,\
x_d1_mean,y_d1_mean,z_d1_mean,x_d1_std,y_d1_std,z_d1_std,xy_corr_d1,xz_corr_d1,yz_corr_d1,\
x_10per_d1,x_25per_d1,x_50per_d1,x_75per_d1,x_90per_d1,x_max_d1,x_min_d1,x_medcross_d1,x_range_d1,x_iqrange_d1,\
y_10per_d1,y_25per_d1,y_50per_d1,y_75per_d1,y_90per_d1,y_max_d1,y_min_d1,y_medcross_d1,y_range_d1,y_iqrange_d1,\
z_10per_d1,z_25per_d1,z_50per_d1,z_75per_d1,z_90per_d1,z_max_d1,z_min_d1,z_medcross_d1,z_range_d1,z_iqrange_d1,\
mag_mean_d1,mag_std_d1,mag_10per_d1,mag_25per_d1,mag_50per_d1,mag_75per_d1,mag_90per_d1,mag_max_d1,mag_min_d1,mag_medcross_d1,mag_range_d1,mag_iqrange_d1])
features = np.concatenate((features,array[0,3:]))
features = np.expand_dims(features, axis=0)
return features
if __name__ == "__main__":
# verify the required arguments are given
if (len(sys.argv) < 2):
print 'Usage: python record_fetcher_between_subject.py <1 for 2-category labels, 0 for 5-category labels>'
exit(1)
if sys.argv[1] == '1':
binary = True
elif sys.argv[1] == '0':
binary = False
else:
print 'Usage: python record_fetcher_between_subject.py <1 for 2-category labels, 0 for 5-category labels>'
exit(1)
rf = record_fetcher()
X_train,X_test,y_train,y_test = rf.fetch(1000,50,binary=binary,seed=1)
np.save('X_train',X_train)
np.save('X_test',X_test)
np.save('y_train',y_train)
np.save('y_test',y_test)
|
mit
| 2,176,022,732,392,023,600
| 45.096408
| 167
| 0.530635
| false
| 3.59328
| true
| false
| false
|
reaperhulk/pynacl
|
src/nacl/__init__.py
|
1
|
1499
|
# Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
import warnings
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
__title__ = "PyNaCl"
__summary__ = (
"Python binding to the Networking and Cryptography (NaCl) " "library"
)
__uri__ = "https://github.com/pyca/pynacl/"
__version__ = "1.5.0.dev1"
__author__ = "The PyNaCl developers"
__email__ = "cryptography-dev@python.org"
__license__ = "Apache License 2.0"
__copyright__ = "Copyright 2013-2018 {0}".format(__author__)
if sys.version_info[0] == 2:
warnings.warn(
"Python 2 is no longer supported by the Python core team. Support for "
"it is now deprecated in PyNaCl, and will be removed in the "
"next release.",
DeprecationWarning,
stacklevel=2,
)
|
apache-2.0
| 646,718,397,534,372,700
| 27.283019
| 79
| 0.661107
| false
| 3.701235
| false
| false
| false
|
dmarley/tfrs
|
server/models/User.py
|
1
|
1571
|
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
from django.db import models
from django.utils import timezone
from .FuelSupplier import FuelSupplier
class User(models.Model):
givenName = models.CharField(max_length=255)
surname = models.CharField(max_length=255)
initials = models.CharField(max_length=255)
email = models.CharField(max_length=255)
status = models.CharField(max_length=255)
fuelSupplier = models.ForeignKey('FuelSupplier', on_delete=models.CASCADE,related_name='UserfuelSupplier')
smUserId = models.CharField(max_length=255)
guid = models.CharField(max_length=255)
smAuthorizationDirectory = models.CharField(max_length=255)
|
apache-2.0
| -2,166,037,394,678,073,900
| 40.342105
| 208
| 0.741566
| false
| 4.167109
| false
| false
| false
|
designcc/django-ccpages
|
ccpages/tests/test_listeners.py
|
1
|
3074
|
import os
from unittest import skipUnless
from decimal import Decimal
from django.test import TestCase
from django.conf import settings
from django.core.files import File
from ccpages.forms import PagePasswordForm
from ccpages.models import Page, PageAttachment
class ListenerTestCases(TestCase):
@skipUnless(os.path.exists('%s/ccpages/test.pdf' % settings.STATIC_ROOT),
'test.pdf file does not exist')
def test_title(self):
"""A title is set on a file from filename is none is supplied"""
# open file
test_pdf = open('%s/ccpages/test.pdf' % settings.STATIC_ROOT)
# make page and attachment
p1 = Page()
p1.title = '1'
p1.slug = '1'
p1.content = '# Hello World'
p1.order = Decimal('1')
p1.password = 'ha'
p1.status = Page.VISIBLE
p1.save()
at1 = PageAttachment()
at1.page = p1
at1.src = File(test_pdf, 'ccpages/test.pdf')
at1.save()
# the title is 'test.pdf'
self.assertEqual(at1.title, 'test.pdf')
test_pdf.close()
os.unlink(at1.src.path)
# make another one, but this time with a title
test_pdf = open('%s/ccpages/test.pdf' % settings.STATIC_ROOT)
at2 = PageAttachment()
at2.page = p1
at2.src = File(test_pdf, 'ccpages/test.pdf')
at2.title = 'Arther'
at2.save()
# title is now arther
self.assertEqual(at2.title, 'Arther')
# delete the files
test_pdf.close()
os.unlink(at2.src.path)
def test_content_rendered(self):
"""When a page is saved the content is passed through
markdown and saved as content_rendered"""
page1 = Page()
page1.title = '1'
page1.slug = '1'
page1.content = '# Hello World'
page1.order = Decimal('1')
page1.password = 'ha'
page1.status = Page.VISIBLE
page1.save()
# we now have rendered content
self.assertHTMLEqual(
page1.content_rendered,
'<h1 id="hello-world">\nHello World\n</h1>')
def test_hash_if_password(self):
"""A hash is generated on save if page has password"""
page1 = Page()
page1.title = '1'
page1.slug = '1'
page1.content = '1'
page1.order = Decimal('1')
page1.password = 'ha'
page1.status = Page.VISIBLE
page1.save()
# get the page
p = Page.objects.get(pk=page1.pk)
# we have a hash
self.assertEqual(
p.hash,
'f9fc27b9374ad1e3bf34fdbcec3a4fd632427fed')
def test_hash_if_no_password(self):
"""A hash is not generated on save if page has no password"""
page1 = Page()
page1.title = '1'
page1.slug = '1'
page1.content = '1'
page1.order = Decimal('1')
page1.status = Page.VISIBLE
page1.save()
# get the page
p = Page.objects.get(pk=page1.pk)
# we have no hash
self.assertFalse(p.hash)
|
bsd-3-clause
| 8,473,854,025,559,766,000
| 32.053763
| 77
| 0.571893
| false
| 3.570267
| true
| false
| false
|
repotvsupertuga/tvsupertuga.repository
|
script.module.openscrapers/lib/openscrapers/sources_openscrapers/de/filmpalast.py
|
1
|
6036
|
# -*- coding: UTF-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import json
import re
import urllib
import urlparse
from openscrapers.modules import cleantitle
from openscrapers.modules import client
from openscrapers.modules import dom_parser
from openscrapers.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['filmpalast.to']
self.base_link = 'http://filmpalast.to'
self.search_link = '/search/title/%s'
self.stream_link = 'stream/%s/1'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases))
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases))
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle,
'aliases': aliases, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['localtvshowtitle']
title += ' S%02dE%02d' % (int(season), int(episode))
aliases = source_utils.aliases_to_array(eval(data['aliases']))
aliases = [i + ' S%02dE%02d' % (int(season), int(episode)) for i in aliases]
url = self.__search([title] + aliases)
if not url and data['tvshowtitle'] != data['localtvshowtitle']:
title = data['tvshowtitle']
title += ' S%02dE%02d' % (int(season), int(episode))
url = self.__search([title] + aliases)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
quality = dom_parser.parse_dom(r, 'span', attrs={'id': 'release_text'})[0].content.split(' ')[0]
quality, info = source_utils.get_release_quality(quality)
r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'})
r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}),
dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r]
r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if
i[0] and i[1]]
for hoster, id in r:
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': quality, 'language': 'de',
'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id,
'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
try:
h_url = []
for id in url:
query = urlparse.urljoin(self.base_link, self.stream_link % id)
r = client.request(query, XHR=True, post=urllib.urlencode({'streamID': id}))
r = json.loads(r)
if 'error' in r and r['error'] == '0' and 'url' in r:
h_url.append(r['url'])
h_url = h_url[0] if len(h_url) == 1 else 'stack://' + ' , '.join(h_url)
return h_url
except:
return
def __search(self, titles):
try:
query = self.search_link % (urllib.quote_plus(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = dom_parser.parse_dom(r, 'article')
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'rb'}, req='href')
r = [(i.attrs['href'], i.content) for i in r]
r = [i[0] for i in r if cleantitle.get(i[1]) in t][0]
return source_utils.strip_domain(r)
except:
return
|
gpl-2.0
| 9,050,439,875,432,628,000
| 40.061224
| 117
| 0.444997
| false
| 3.638336
| false
| false
| false
|
victorhahncastell/deepdiff
|
deepdiff/contenthash.py
|
1
|
8366
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import sys
from collections import Iterable
from collections import MutableMapping
from collections import defaultdict
from decimal import Decimal
from hashlib import sha1
import logging
from deepdiff.helper import py3, int, strings, numbers, items
logger = logging.getLogger(__name__)
class Skipped(object):
def __repr__(self):
return "Skipped" # pragma: no cover
def __str__(self):
return "Skipped" # pragma: no cover
class Unprocessed(object):
def __repr__(self):
return "Error: Unprocessed" # pragma: no cover
def __str__(self):
return "Error: Unprocessed" # pragma: no cover
class NotHashed(object):
def __repr__(self):
return "Error: NotHashed" # pragma: no cover
def __str__(self):
return "Error: NotHashed" # pragma: no cover
class DeepHash(dict):
r"""
**DeepHash**
"""
def __init__(self,
obj,
hashes=None,
exclude_types=set(),
hasher=hash,
ignore_repetition=True,
significant_digits=None,
**kwargs):
if kwargs:
raise ValueError(
("The following parameter(s) are not valid: %s\n"
"The valid parameters are obj, hashes, exclude_types."
"hasher and ignore_repetition.") % ', '.join(kwargs.keys()))
self.obj = obj
self.exclude_types = set(exclude_types)
self.exclude_types_tuple = tuple(
exclude_types) # we need tuple for checking isinstance
self.ignore_repetition = ignore_repetition
self.hasher = hasher
hashes = hashes if hashes else {}
self.update(hashes)
self['unprocessed'] = []
self.unprocessed = Unprocessed()
self.skipped = Skipped()
self.not_hashed = NotHashed()
self.significant_digits = significant_digits
self.__hash(obj, parents_ids=frozenset({id(obj)}))
if self['unprocessed']:
logger.warning("Can not hash the following items: {}.".format(self['unprocessed']))
else:
del self['unprocessed']
@staticmethod
def sha1hex(obj):
"""Use Sha1 for more accuracy."""
if py3: # pragma: no cover
if isinstance(obj, str):
obj = "{}:{}".format(type(obj).__name__, obj)
obj = obj.encode('utf-8')
elif isinstance(obj, bytes):
obj = type(obj).__name__.encode('utf-8') + b":" + obj
else: # pragma: no cover
if isinstance(obj, unicode):
obj = u"{}:{}".format(type(obj).__name__, obj)
obj = obj.encode('utf-8')
elif isinstance(obj, str):
obj = type(obj).__name__ + ":" + obj
return sha1(obj).hexdigest()
@staticmethod
def __add_to_frozen_set(parents_ids, item_id):
parents_ids = set(parents_ids)
parents_ids.add(item_id)
return frozenset(parents_ids)
def __get_and_set_str_hash(self, obj):
obj_id = id(obj)
result = self.hasher(obj)
result = "str:{}".format(result)
self[obj_id] = result
return result
def __hash_obj(self, obj, parents_ids=frozenset({}), is_namedtuple=False):
"""Difference of 2 objects"""
try:
if is_namedtuple:
obj = obj._asdict()
else:
obj = obj.__dict__
except AttributeError:
try:
obj = {i: getattr(obj, i) for i in obj.__slots__}
except AttributeError:
self['unprocessed'].append(obj)
return self.unprocessed
result = self.__hash_dict(obj, parents_ids)
result = "nt{}".format(result) if is_namedtuple else "obj{}".format(
result)
return result
def __skip_this(self, obj):
skip = False
if isinstance(obj, self.exclude_types_tuple):
skip = True
return skip
def __hash_dict(self, obj, parents_ids=frozenset({})):
result = []
obj_keys = set(obj.keys())
for key in obj_keys:
key_hash = self.__hash(key)
item = obj[key]
item_id = id(item)
if parents_ids and item_id in parents_ids:
continue
parents_ids_added = self.__add_to_frozen_set(parents_ids, item_id)
hashed = self.__hash(item, parents_ids_added)
hashed = "{}:{}".format(key_hash, hashed)
result.append(hashed)
result.sort()
result = ';'.join(result)
result = "dict:{%s}" % result
return result
def __hash_set(self, obj):
return "set:{}".format(self.__hash_iterable(obj))
def __hash_iterable(self, obj, parents_ids=frozenset({})):
result = defaultdict(int)
for i, x in enumerate(obj):
if self.__skip_this(x):
continue
item_id = id(x)
if parents_ids and item_id in parents_ids:
continue
parents_ids_added = self.__add_to_frozen_set(parents_ids, item_id)
hashed = self.__hash(x, parents_ids_added)
result[hashed] += 1
if self.ignore_repetition:
result = list(result.keys())
else:
result = [
'{}|{}'.format(i[0], i[1]) for i in getattr(result, items)()
]
result.sort()
result = ','.join(result)
result = "{}:{}".format(type(obj).__name__, result)
return result
def __hash_str(self, obj):
return self.__get_and_set_str_hash(obj)
def __hash_number(self, obj):
# Based on diff.DeepDiff.__diff_numbers
if self.significant_digits is not None and isinstance(obj, (
float, complex, Decimal)):
obj_s = ("{:.%sf}" % self.significant_digits).format(obj)
# Special case for 0: "-0.00" should compare equal to "0.00"
if set(obj_s) <= set("-0."):
obj_s = "0.00"
result = "number:{}".format(obj_s)
obj_id = id(obj)
self[obj_id] = result
else:
result = "{}:{}".format(type(obj).__name__, obj)
return result
def __hash_tuple(self, obj, parents_ids):
# Checking to see if it has _fields. Which probably means it is a named
# tuple.
try:
obj._asdict
# It must be a normal tuple
except AttributeError:
result = self.__hash_iterable(obj, parents_ids)
# We assume it is a namedtuple then
else:
result = self.__hash_obj(obj, parents_ids, is_namedtuple=True)
return result
def __hash(self, obj, parent="root", parents_ids=frozenset({})):
"""The main diff method"""
obj_id = id(obj)
if obj_id in self:
return self[obj_id]
result = self.not_hashed
if self.__skip_this(obj):
result = self.skipped
elif obj is None:
result = 'NONE'
elif isinstance(obj, strings):
result = self.__hash_str(obj)
elif isinstance(obj, numbers):
result = self.__hash_number(obj)
elif isinstance(obj, MutableMapping):
result = self.__hash_dict(obj, parents_ids)
elif isinstance(obj, tuple):
result = self.__hash_tuple(obj, parents_ids)
elif isinstance(obj, (set, frozenset)):
result = self.__hash_set(obj)
elif isinstance(obj, Iterable):
result = self.__hash_iterable(obj, parents_ids)
else:
result = self.__hash_obj(obj, parents_ids)
if result != self.not_hashed and obj_id not in self and not isinstance(
obj, numbers):
self[obj_id] = result
if result is self.not_hashed: # pragma: no cover
self[obj_id] = self.not_hashed
self['unprocessed'].append(obj)
return result
if __name__ == "__main__": # pragma: no cover
if not py3:
sys.exit("Please run with Python 3 to verify the doc strings.")
import doctest
doctest.testmod()
|
mit
| 5,563,972,840,234,503,000
| 29.421818
| 95
| 0.536338
| false
| 4.069066
| false
| false
| false
|
mrsamuelbarnes/Blockchain-Ballot
|
crypto.py
|
1
|
6940
|
# Import required modules
import sys
import random
# Import custom modules
import file
# Definition for a public key
class PublicKey:
def __init__(self, n, g):
# 'n' is a product of the two primes chosen for the key
self.n = n
# 'g' is the public exponent used to encrypt messages
self.g = g
# Definition for a private key
class PrivateKey:
def __init__(self, n, phiN, u):
# 'n' is a product of the two primes chosen for the key
self.n = n
# 'phiN' is the phi of the two primes chosen for the key
self.phiN = phiN
# 'u' is the modular inverse of n mod phi(n)
self.u = u
# Generate a random number of 'n' bits from the system entropy function
def randomNumber(bits):
return random.SystemRandom().getrandbits(bits)
# Perform an 'n' round Miller-Rabin primality test (default 40 rounds has fault rate of 2^-128)
def millerRabin(number, rounds=40):
# Get 'm' and 'k' that satisfies 'number - 1 = 2^k * m' with whole numbers
# Initalise 'k'
k = 0
# Initalise 'm'
m = number - 1
# When 'm' becomes odd the next iteration wont be whole
while m % 2 == 0:
# Iterate 'k'
k += 1
# Calculate 'm'
m /= 2
# Perform the specified number of rounds
for index in xrange(rounds):
# Perform a single round
if not millerRabinRound(number, m, k):
# The round failed, the number is a composite
return False
# The number passed the specified rounds of accuracy
return True
# Perform a single Miller-Rabin round for the given values
# Returns true for a round pass
def millerRabinRound(number, m, k):
# Generate a random 'a' where 1 < a < number - 1
a = random.randrange(2, number - 1)
# Calculate the value for 'x' where x = a^m mod number
x = pow(a, m, number)
# Check if 'x' is 1 or 'number' - 1 which indicates a probable prime
if x == 1 or x == number - 1:
# The number has passed the round
return True
# Loop the operation 'k' times until a round pass or a composite is found
for index in xrange(k - 1):
# Recalculate 'x'
x = pow(x, 2, number)
# Break loop if 'x' is 'number' - 1
if x == number - 1:
break
# If the loop completes the number is composite
else:
# The number has failed the round
return False
#The number has passed the round
return True
# Test if a number is a probable prime
def isProbablePrime(number):
# Number is not prime if it is even
if number % 2 == 0:
return False
# Perform a Miller-Rabin test with the default number of rounds
if millerRabin(number):
# The number passed the test
return True
else:
# The number failed the test
return False
# Generate a probable prime suitable for use in public key encryption
def generatePrime():
# Loop until a suitable prime is found
while True:
# Generate a prime number of 512 bits
possiblePrime = randomNumber(512)
# Return the number if it is a probable prime
if isProbablePrime(possiblePrime):
return possiblePrime
# Calculate modular inverse (a^-1 mod c)
def modularInverse(a, c):
# Set 'b' as 'c' for use in the algorithm
b = c
# Set initial Bezout Coefficients
coefficientT = 0
lastCoefficientT = 1
coefficientS = 1
lastCoefficientS = 0
# Loop until a GCD is found
gcdFound = False
while not gcdFound:
# Calculate the quotient for this round
quotient = a // b
# Calculate the remainder for this round
a, b = b, a % b
# Check if the GCD has been found
if (b == 0):
gcdFound = True
# Calculate the coefficients for this round
coefficientT, lastCoefficientT = lastCoefficientT - quotient * coefficientT, coefficientT
coefficientS, lastCoefficientS = lastCoefficientS - quotient * coefficientS, coefficientS
# Return the calculated inverse
return lastCoefficientT % c
# Generate a Paillier private key and related public key
def generateKeyPair():
# Get 2 Paillier suitable prime numbers
firstPrime = generatePrime()
secondPrime = generatePrime()
# Ensure the primes are distinct
if firstPrime == secondPrime:
# Reattempt the generation
return generateKeyPair()
# Compute composite number 'n'
n = firstPrime * secondPrime
# Compute the phi of 'n'
phiN = (firstPrime - 1) * (secondPrime - 1)
# Compute 'g' for the public key
g = n + 1
# Compute the modular inverse of 'phiN' 'n', phiN^-1 mod n
u = modularInverse(phiN, n)
# Create the public key
public = PublicKey(n, g)
# Create the private key
private = PrivateKey(n, phiN, u)
# Return the key pair
return public, private
# Encrypt plaintext using a Paillier public key
def encrypt(publicKey, plaintext):
# Calculate n^2
nSquared = publicKey.n ** 2
# Generate a random 'r' where 1 < r < n - 1
r = random.randrange(2, publicKey.n - 1)
# Compute the cyphertext as cyphertext = (g^plaintext mod n^2) * (r^n mod n^2) mod n^2
cyphertext = ( pow(publicKey.g, plaintext, nSquared) *
pow(r, publicKey.n, nSquared) % nSquared )
# Return the encrypted cypher
return cyphertext
# Decrypt Paillier cyphertext using a private key
def decrypt(privateKey, cyphertext):
# Calculate n^2
nSquared = privateKey.n ** 2
# Compute the plaintext as plaintext = L(cyphertext^phiN mod n^2) * u mod n
# Where L(x) = (x - 1) / n
plaintext = ( (pow(cyphertext, privateKey.phiN, nSquared) - 1)
// privateKey.n * privateKey.u % privateKey.n )
# Return the decrypted plaintext
return plaintext
# Apply a homomorphic addition to two integers encrypted by the same key
def homomorphicAdd(publicKey, encryptedInteger1, encryptedInteger2):
# Compute the addition as result = encryptedInteger1 * encryptedInteger2 mod n^2
return encryptedInteger1 * encryptedInteger2 % (publicKey.n ** 2)
# Add the encrypted votes of a ballot
def addVotes(votes, publicKey):
# Initalise results array
results = []
# Calculate the number of candidates
candidateCount = len(votes[0])
# Loop through each vote
for index in xrange(len(votes)):
# Check if this is the first vote
if index == 0:
# Simply add the values
results = votes[index]
else:
# Loop through each value
for valueIndex in xrange(candidateCount):
# homomorphicly add this value to the result
results[valueIndex] = homomorphicAdd(publicKey, results[valueIndex], votes[index][valueIndex])
# Return the encrypted results
return results
|
mit
| -8,274,426,283,300,570,000
| 25.48855
| 110
| 0.63415
| false
| 3.823691
| false
| false
| false
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/pipeline/api/keras/optimizers.py
|
1
|
4419
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.util.common import *
from bigdl.optim.optimizer import OptimMethod, Default
from zoo.pipeline.api.keras.base import ZooKerasCreator
from zoo.common.utils import callZooFunc
if sys.version >= '3':
long = int
unicode = str
class Adam(OptimMethod, ZooKerasCreator):
"""
An implementation of Adam with learning rate schedule.
>>> adam = Adam()
creating: createZooKerasAdam
creating: createDefault
"""
def __init__(self,
lr=1e-3,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
decay=0.0,
schedule=None,
weight_decay=0.0,
bigdl_type="float"):
"""
:param lr learning rate
:param beta_1 first moment coefficient
:param beta_2 second moment coefficient
:param epsilon for numerical stability
:param decay learning rate decay
:param schedule learning rate schedule, e.g. Warmup or Poly from BigDL
"""
# explicitly reimplement the constructor since:
# 1. This class need to be a subclass of OptimMethod
# 2. The constructor of OptimMethod invokes JavaValue.jvm_class_constructor() directly
# and does not take the polymorphism.
self.value = callZooFunc(
bigdl_type, ZooKerasCreator.jvm_class_constructor(self),
lr,
beta_1,
beta_2,
epsilon,
decay,
weight_decay,
schedule if (schedule) else Default()
)
self.bigdl_type = bigdl_type
class AdamWeightDecay(OptimMethod, ZooKerasCreator):
"""
Implements BERT version of Adam algorithm.
>>> adam = AdamWeightDecay()
creating: createZooKerasAdamWeightDecay
"""
def __init__(self,
lr=1e-3,
warmup_portion=-1.0,
total=-1,
schedule="linear",
beta1=0.9,
beta2=0.999,
epsilon=1e-6,
weight_decay=0.01,
bigdl_type="float"):
"""
:param lr learning rate
:param warmupPortion portion of total for the warmup, -1 means no warmup. Default: -1
:param total total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
:param schedule schedule to use for the warmup. Default: 'linear'
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
:param weightDecay weight decay
"""
# explicitly reimplement the constructor since:
# 1. This class need to be a subclass of OptimMethod
# 2. The constructor of OptimMethod invokes JavaValue.jvm_class_constructor() directly
# and does not take the polymorphism.
self.value = callZooFunc(
bigdl_type, ZooKerasCreator.jvm_class_constructor(self),
lr,
warmup_portion,
total,
schedule,
beta1,
beta2,
epsilon,
weight_decay)
self.bigdl_type = bigdl_type
class PolyEpochDecay(ZooKerasCreator):
"""
A learning rate decay policy, where the effective learning rate
follows a polynomial decay, to be zero by the max_epochs.
Calculation: init_lr * (1 - epoch/max_iteration) ^ (power)
:param power: The coefficient of decay.
:param max_epochs: The maximum number of epochs when lr becomes zero.
>>> poly = PolyEpochDecay(0.5, 5)
creating: createZooKerasPolyEpochDecay
"""
def __init__(self, power, max_epochs, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, power, max_epochs)
|
apache-2.0
| -793,365,526,256,598,700
| 32.732824
| 94
| 0.61394
| false
| 4.13377
| false
| false
| false
|
arenadata/ambari
|
ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server_upgrade.py
|
1
|
8007
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.core.resources.system import Execute
from resource_management.core import shell
from resource_management.libraries.functions import format
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.version import compare_versions
def pre_upgrade_deregister():
"""
Runs the "hive --service hiveserver2 --deregister <version>" command to
de-provision the server in preparation for an upgrade. This will contact
ZooKeeper to remove the server so that clients that attempt to connect
will be directed to other servers automatically. Once all
clients have drained, the server will shutdown automatically; this process
could take a very long time.
This function will obtain the Kerberos ticket if security is enabled.
:return:
"""
import params
Logger.info('HiveServer2 executing "deregister" command in preparation for upgrade...')
if params.security_enabled:
kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
Execute(kinit_command,user=params.smokeuser)
# calculate the current hive server version
current_hiveserver_version = _get_current_hiveserver_version()
if current_hiveserver_version is None:
raise Fail('Unable to determine the current HiveServer2 version to deregister.')
# fallback when upgrading because /usr/iop/current/hive-server2/conf/conf.server may not exist
hive_server_conf_dir = params.hive_server_conf_dir
if not os.path.exists(hive_server_conf_dir):
hive_server_conf_dir = "/etc/hive/conf.server"
# deregister
hive_execute_path = params.execute_path
# If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
# If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
if "upgrade" == params.upgrade_direction:
# hive_bin
upgrade_target_version = format_stack_version(params.version)
if upgrade_target_version and compare_versions(upgrade_target_version, "4.1.0.0") >= 0:
upgrade_target_hive_bin = format('/usr/iop/{version}/hive/bin')
if (os.pathsep + params.hive_bin) in hive_execute_path:
hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + upgrade_target_hive_bin)
# hadoop_bin_dir
upgrade_target_hadoop_bin = stack_select.get_hadoop_dir("bin", upgrade_stack_only=True)
upgrade_source_hadoop_bin = params.hadoop_bin_dir
if upgrade_target_hadoop_bin and len(upgrade_target_hadoop_bin) > 0 and (os.pathsep + upgrade_source_hadoop_bin) in hive_execute_path:
hive_execute_path = hive_execute_path.replace(os.pathsep + upgrade_source_hadoop_bin, os.pathsep + upgrade_target_hadoop_bin)
command = format('hive --config {hive_server_conf_dir} --service hiveserver2 --deregister ' + current_hiveserver_version)
Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
def _get_current_hiveserver_version():
"""
Runs "hive --version" and parses the result in order
to obtain the current version of hive.
:return: the hiveserver2 version, returned by "hive --version"
"""
import params
try:
command = 'hive --version'
return_code, iop_output = shell.call(command, user=params.hive_user, path=params.execute_path)
except Exception, e:
Logger.error(str(e))
raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
if return_code != 0:
raise Fail('Unable to determine the current HiveServer2 version because of a non-zero return code of {0}'.format(str(return_code)))
match = re.search('^(Hive) ([0-9]+.[0-9]+.\S+)', iop_output, re.MULTILINE)
if match:
current_hive_server_version = match.group(2)
return current_hive_server_version
else:
raise Fail('The extracted hiveserver2 version "{0}" does not matching any known pattern'.format(iop_output))
def post_upgrade_deregister():
"""
Runs the "hive --service hiveserver2 --deregister <version>" command to
de-provision the server in preparation for an upgrade. This will contact
ZooKeeper to remove the server so that clients that attempt to connect
will be directed to other servers automatically. Once all
clients have drained, the server will shutdown automatically; this process
could take a very long time.
This function will obtain the Kerberos ticket if security is enabled.
:return:
"""
import params
Logger.info('HiveServer2 executing "deregister" command to complete upgrade...')
if params.security_enabled:
kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
Execute(kinit_command,user=params.smokeuser)
# calculate the current hive server version
current_hiveserver_version = _get_current_hiveserver_version()
if current_hiveserver_version is None:
raise Fail('Unable to determine the current HiveServer2 version to deregister.')
# fallback when upgrading because /usr/hdp/current/hive-server2/conf/conf.server may not exist
hive_server_conf_dir = params.hive_server_conf_dir
if not os.path.exists(hive_server_conf_dir):
hive_server_conf_dir = "/etc/hive/conf.server"
# deregister
hive_execute_path = params.execute_path
# If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
# If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
# By now <stack-selector-tool> has been called to set 'current' to target-stack
if params.downgrade_from_version is not None:
hive_execute_path = _get_hive_execute_path(params.downgrade_from_version)
command = format('hive --config {hive_server_conf_dir} --service hiveserver2 --deregister ' + current_hiveserver_version)
Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
def _get_hive_execute_path(stack_version):
"""
Returns the exact execute path to use for the given stack-version.
This method does not return the "current" path
:param stack_version: Exact stack-version to use in the new path
:return: Hive execute path for the exact hdp stack-version
"""
import params
hive_execute_path = params.execute_path
formatted_stack_version = format_stack_version(stack_version)
if formatted_stack_version and compare_versions(formatted_stack_version, "4.1") >= 0:
# hive_bin
new_hive_bin = format('/usr/iop/{stack_version}/hive/bin')
if (os.pathsep + params.hive_bin) in hive_execute_path:
hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + new_hive_bin)
# hadoop_bin_dir
new_hadoop_bin = stack_select.get_hadoop_dir_for_stack_version("bin", stack_version)
old_hadoop_bin = params.hadoop_bin_dir
if new_hadoop_bin and len(new_hadoop_bin) > 0 and (os.pathsep + old_hadoop_bin) in hive_execute_path:
hive_execute_path = hive_execute_path.replace(os.pathsep + old_hadoop_bin, os.pathsep + new_hadoop_bin)
return hive_execute_path
|
apache-2.0
| -3,441,732,457,333,918,700
| 46.1
| 138
| 0.749594
| false
| 3.745089
| false
| false
| false
|
xgfone/snippet
|
snippet/example/python/project/project/common/utils.py
|
1
|
2500
|
# encoding: utf-8
from __future__ import absolute_import, print_function, unicode_literals, division
import pbr.version
from six import text_type as unicode_type
from six import string_types as basestring_type
from six import binary_type as bytes_type
_BYTES_TYPES = (bytes_type, type(None))
_UNICODE_TYPES = (unicode_type, type(None))
_BASESTRING_TYPES = (basestring_type, type(None))
def get_version(project, version=None):
if version:
return version
return pbr.version.VersionInfo(project).version_string()
def to_bytes(obj, encoding="utf-8"):
"""Converts a string argument to a bytes string.
If the argument is already a bytes string or None, it is returned
unchanged. Otherwise it must be a unicode string and is decoded as
the argument of encoding."""
if isinstance(obj, _BYTES_TYPES):
return obj
elif isinstance(obj, unicode_type):
return obj.encode(encoding)
raise TypeError("Expected bytes, unicode, or None; got %r" % type(obj))
def to_unicode(obj, decoding="utf-8"):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as
the argument of encoding.
"""
if isinstance(obj, _UNICODE_TYPES):
return obj
elif isinstance(obj, bytes_type):
return obj.decode(decoding)
raise TypeError("Expected bytes, unicode, or None; got %r" % type(obj))
def to_basestring(value, encoding="utf-8"):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
if not isinstance(value, bytes):
return value.decode(encoding)
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
to_str = native_str = to_unicode
else:
to_str = native_str = to_bytes
|
mit
| 5,995,021,948,278,806,000
| 34.231884
| 82
| 0.6908
| false
| 4.098361
| false
| false
| false
|
maralorn/pythonlights
|
sound.py
|
1
|
1790
|
#!/usr/bin/python
# open a microphone in pyAudio and get its FFT spectrum
import pyaudio
import numpy as np
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
INPUT_BLOCK_TIME = 0.08
GLIDING_DIVIDER = 4
INPUT_FRAMES_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME/GLIDING_DIVIDER)
soundtype = np.dtype([('l',np.int16),('r',np.int16)])
class Listener(object):
def __init__(self):
self.pa = pyaudio.PyAudio()
self.stream = self.open_mic_stream()
raw = self.listen()
for i in range(1,GLIDING_DIVIDER):
raw += self.listen()
stereodata = np.fromstring(raw,soundtype)
self.buf = (stereodata['l'] + stereodata['r'])/2
def stop(self):
self.stream.close()
def open_mic_stream( self ):
stream = self.pa.open( format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = None,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
return stream
def listen(self):
try:
block = self.stream.read(INPUT_FRAMES_PER_BLOCK)
except IOError:
return
return block
# Returns the FFT of a sound sample recorded over INPUT_BLOCK_TIME.
# This is a numpy array of RATE*INPUT_BLOCK_TIME/2 values.
# The i-th element represents the frequency i/INPUT_BLOCK_TIME
def get_spectrum(self):
raw = self.listen()
stereodata = np.fromstring(raw,soundtype)
monodata = (stereodata['l'] + stereodata['r'])/2
self.buf[:-len(monodata)] = self.buf[len(monodata):]
self.buf[-len(monodata):] = monodata
return abs(np.fft.rfft(self.buf))
|
mit
| -6,446,139,762,912,498,000
| 29.862069
| 76
| 0.569832
| false
| 3.572854
| false
| false
| false
|
infoxchange/barman
|
barman/lockfile.py
|
1
|
9546
|
# Copyright (C) 2011-2017 2ndQuadrant Limited
#
# This file is part of Barman.
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
"""
This module is the lock manager for Barman
"""
import errno
import fcntl
import os
import re
from barman.exceptions import (LockFileBusy, LockFileParsingError,
LockFilePermissionDenied)
class LockFile(object):
"""
Ensures that there is only one process which is running against a
specified LockFile.
It supports the Context Manager interface, allowing the use in with
statements.
with LockFile('file.lock') as locked:
if not locked:
print "failed"
else:
<do something>
You can also use exceptions on failures
try:
with LockFile('file.lock', True):
<do something>
except LockFileBusy, e, file:
print "failed to lock %s" % file
"""
LOCK_PATTERN = None
"""
If defined in a subclass, it must be a compiled regular expression
which matches the lock filename.
It must provide named groups for the constructor parameters which produce
the same lock name. I.e.:
>>> ServerWalReceiveLock('/tmp', 'server-name').filename
'/tmp/.server-name-receive-wal.lock'
>>> ServerWalReceiveLock.LOCK_PATTERN = re.compile(
r'\.(?P<server_name>.+)-receive-wal\.lock')
>>> m = ServerWalReceiveLock.LOCK_PATTERN.match(
'.server-name-receive-wal.lock')
>>> ServerWalReceiveLock('/tmp', **(m.groupdict())).filename
'/tmp/.server-name-receive-wal.lock'
"""
@classmethod
def build_if_matches(cls, path):
"""
Factory method that creates a lock instance if the path matches
the lock filename created by the actual class
:param path: the full path of a LockFile
:return:
"""
# If LOCK_PATTERN is not defined always return None
if not cls.LOCK_PATTERN:
return None
# Matches the provided path against LOCK_PATTERN
lock_directory = os.path.abspath(os.path.dirname(path))
lock_name = os.path.basename(path)
match = cls.LOCK_PATTERN.match(lock_name)
if match:
# Build the lock object for the provided path
return cls(lock_directory, **(match.groupdict()))
return None
def __init__(self, filename, raise_if_fail=True, wait=False):
self.filename = os.path.abspath(filename)
self.fd = None
self.raise_if_fail = raise_if_fail
self.wait = wait
def acquire(self, raise_if_fail=None, wait=None):
"""
Creates and holds on to the lock file.
When raise_if_fail, a LockFileBusy is raised if
the lock is held by someone else and a LockFilePermissionDenied is
raised when the user executing barman have insufficient rights for
the creation of a LockFile.
Returns True if lock has been successfully acquired, False otherwise.
:param bool raise_if_fail: If True raise an exception on failure
:param bool wait: If True issue a blocking request
:returns bool: whether the lock has been acquired
"""
if self.fd:
return True
fd = None
# method arguments take precedence on class parameters
raise_if_fail = raise_if_fail \
if raise_if_fail is not None else self.raise_if_fail
wait = wait if wait is not None else self.wait
try:
# 384 is 0600 in octal, 'rw-------'
fd = os.open(self.filename, os.O_CREAT | os.O_RDWR, 384)
flags = fcntl.LOCK_EX
if not wait:
flags |= fcntl.LOCK_NB
fcntl.flock(fd, flags)
# Once locked, replace the content of the file
os.lseek(fd, 0, os.SEEK_SET)
os.write(fd, ("%s\n" % os.getpid()).encode('ascii'))
# Truncate the file at the current position
os.ftruncate(fd, os.lseek(fd, 0, os.SEEK_CUR))
self.fd = fd
return True
except (OSError, IOError) as e:
if fd:
os.close(fd) # let's not leak file descriptors
if raise_if_fail:
if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK):
raise LockFileBusy(self.filename)
elif e.errno == errno.EACCES:
raise LockFilePermissionDenied(self.filename)
else:
raise
else:
return False
def release(self):
"""
Releases the lock.
If the lock is not held by the current process it does nothing.
"""
if not self.fd:
return
try:
fcntl.flock(self.fd, fcntl.LOCK_UN)
os.close(self.fd)
except (OSError, IOError):
pass
self.fd = None
def __del__(self):
"""
Avoid stale lock files.
"""
self.release()
# Contextmanager interface
def __enter__(self):
return self.acquire()
def __exit__(self, exception_type, value, traceback):
self.release()
def get_owner_pid(self):
"""
Test whether a lock is already held by a process.
Returns the PID of the owner process or None if the lock is available.
:rtype: int|None
:raises LockFileParsingError: when the lock content is garbled
:raises LockFilePermissionDenied: when the lockfile is not accessible
"""
try:
self.acquire(raise_if_fail=True, wait=False)
except LockFileBusy:
try:
# Read the lock content and parse the PID
# NOTE: We cannot read it in the self.acquire method to avoid
# reading the previous locker PID
with open(self.filename, 'r') as file_object:
return int(file_object.readline().strip())
except ValueError as e:
# This should not happen
raise LockFileParsingError(e)
# release the lock and return None
self.release()
return None
class GlobalCronLock(LockFile):
"""
This lock protects cron from multiple executions.
Creates a global '.cron.lock' lock file under the given lock_directory.
"""
def __init__(self, lock_directory):
super(GlobalCronLock, self).__init__(
os.path.join(lock_directory, '.cron.lock'),
raise_if_fail=True)
class ServerBackupLock(LockFile):
"""
This lock protects a server from multiple executions of backup command
Creates a '.<SERVER>-backup.lock' lock file under the given lock_directory
for the named SERVER.
"""
def __init__(self, lock_directory, server_name):
super(ServerBackupLock, self).__init__(
os.path.join(lock_directory, '.%s-backup.lock' % server_name),
raise_if_fail=True)
class ServerCronLock(LockFile):
"""
This lock protects a server from multiple executions of cron command
Creates a '.<SERVER>-cron.lock' lock file under the given lock_directory
for the named SERVER.
"""
def __init__(self, lock_directory, server_name):
super(ServerCronLock, self).__init__(
os.path.join(lock_directory, '.%s-cron.lock' % server_name),
raise_if_fail=True, wait=False)
class ServerXLOGDBLock(LockFile):
"""
This lock protects a server's xlogdb access
Creates a '.<SERVER>-xlogdb.lock' lock file under the given lock_directory
for the named SERVER.
"""
def __init__(self, lock_directory, server_name):
super(ServerXLOGDBLock, self).__init__(
os.path.join(lock_directory, '.%s-xlogdb.lock' % server_name),
raise_if_fail=True, wait=True)
class ServerWalArchiveLock(LockFile):
"""
This lock protects a server from multiple executions of wal-archive command
Creates a '.<SERVER>-archive-wal.lock' lock file under
the given lock_directory for the named SERVER.
"""
def __init__(self, lock_directory, server_name):
super(ServerWalArchiveLock, self).__init__(
os.path.join(lock_directory, '.%s-archive-wal.lock' % server_name),
raise_if_fail=True, wait=False)
class ServerWalReceiveLock(LockFile):
"""
This lock protects a server from multiple executions of receive-wal command
Creates a '.<SERVER>-receive-wal.lock' lock file under
the given lock_directory for the named SERVER.
"""
# TODO: Implement on the other LockFile subclasses
LOCK_PATTERN = re.compile(r'\.(?P<server_name>.+)-receive-wal\.lock')
def __init__(self, lock_directory, server_name):
super(ServerWalReceiveLock, self).__init__(
os.path.join(lock_directory, '.%s-receive-wal.lock' % server_name),
raise_if_fail=True, wait=False)
|
gpl-3.0
| -7,858,874,542,976,082,000
| 32.261324
| 79
| 0.610937
| false
| 4.125324
| false
| false
| false
|
leonsas/django-push-notifications
|
push_notifications/gcm.py
|
1
|
4098
|
"""
Google Cloud Messaging
Previously known as C2DM
Documentation is available on the Android Developer website:
https://developer.android.com/google/gcm/index.html
"""
import json
try:
from urllib.request import Request, urlopen
from urllib.parse import urlencode
except ImportError:
# Python 2 support
from urllib2 import Request, urlopen
from urllib import urlencode
from django.core.exceptions import ImproperlyConfigured
from . import NotificationError
from .settings import PUSH_NOTIFICATIONS_SETTINGS as SETTINGS
class GCMError(NotificationError):
pass
def _chunks(l, n):
"""
Yield successive chunks from list \a l with a minimum size \a n
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def _gcm_send(data, content_type):
key = SETTINGS.get("GCM_API_KEY")
if not key:
raise ImproperlyConfigured('You need to set PUSH_NOTIFICATIONS_SETTINGS["GCM_API_KEY"] to send messages through GCM.')
headers = {
"Content-Type": content_type,
"Authorization": "key=%s" % (key),
"Content-Length": str(len(data)),
}
request = Request(SETTINGS["GCM_POST_URL"], data, headers)
return urlopen(request).read()
def _gcm_send_plain(registration_id, data, collapse_key=None, delay_while_idle=False, time_to_live=0):
"""
Sends a GCM notification to a single registration_id.
This will send the notification as form data.
If sending multiple notifications, it is more efficient to use
gcm_send_bulk_message() with a list of registration_ids
"""
values = {"registration_id": registration_id}
if collapse_key:
values["collapse_key"] = collapse_key
if delay_while_idle:
values["delay_while_idle"] = int(delay_while_idle)
if time_to_live:
values["time_to_live"] = time_to_live
for k, v in data.items():
values["data.%s" % (k)] = v.encode("utf-8")
data = urlencode(sorted(values.items())).encode("utf-8") # sorted items for tests
result = _gcm_send(data, "application/x-www-form-urlencoded;charset=UTF-8")
if result.startswith("Error="):
raise GCMError(result)
return result
def _gcm_send_json(registration_ids, data, collapse_key=None, delay_while_idle=False, time_to_live=0):
"""
Sends a GCM notification to one or more registration_ids. The registration_ids
needs to be a list.
This will send the notification as json data.
"""
values = {"registration_ids": registration_ids}
if data is not None:
values["data"] = data
if collapse_key:
values["collapse_key"] = collapse_key
if delay_while_idle:
values["delay_while_idle"] = delay_while_idle
if time_to_live:
values["time_to_live"] = time_to_live
data = json.dumps(values, separators=(",", ":"), sort_keys=True).encode("utf-8") # keys sorted for tests
result = json.loads(_gcm_send(data, "application/json"))
if result["failure"]:
raise GCMError(result)
return result
def gcm_send_message(registration_id, data, collapse_key=None, delay_while_idle=False, time_to_live=0):
"""
Sends a GCM notification to a single registration_id.
This will send the notification as form data if possible, otherwise it will
fall back to json data.
If sending multiple notifications, it is more efficient to use
gcm_send_bulk_message() with a list of registration_ids
"""
args = data, collapse_key, delay_while_idle, time_to_live
try:
_gcm_send_plain(registration_id, *args)
except AttributeError:
_gcm_send_json([registration_id], *args)
def gcm_send_bulk_message(registration_ids, data, collapse_key=None, delay_while_idle=False, time_to_live=0):
"""
Sends a GCM notification to one or more registration_ids. The registration_ids
needs to be a list.
This will send the notification as json data.
"""
args = data, collapse_key, delay_while_idle, time_to_live
# GCM only allows up to 1000 reg ids per bulk message
# https://developer.android.com/google/gcm/gcm.html#request
max_recipients = SETTINGS.get("GCM_MAX_RECIPIENTS")
if len(registration_ids) > max_recipients:
ret = []
for chunk in _chunks(registration_ids, max_recipients):
ret.append(_gcm_send_json(chunk, *args))
return ret
return _gcm_send_json(registration_ids, *args)
|
mit
| 1,631,497,409,407,225,000
| 27.068493
| 120
| 0.724012
| false
| 3.135425
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.