repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
summerisgone/gitrecipe
|
setup.py
|
1
|
1115
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name="gitrecipe",
version='0.0.2',
description='Simple buildout recipe for downloading git repositories. It uses system git command and its syntax',
author='Ivan Gromov',
author_email='summer.is.gone@gmail.com',
url='http://github.com/summerisgone/gitrecipe',
download_url='http://github.com/summerisgone/gitrecipe/zipball/0.1',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Buildout',
'Intended Audience :: Developers',
'License :: Freely Distributable',
'Natural Language :: Russian',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: Python :: 2.5',
'Topic :: Software Development :: Version Control',
],
namespace_packages=['recipe'],
packages=find_packages(),
install_requires=['setuptools', 'zc.recipe.egg'],
entry_points={'zc.buildout': ['default = recipe.git:GitRecipe']},
zip_safe=False,
long_description=open('README.rst').read(),
)
|
bsd-3-clause
| -8,936,679,593,237,095,000
| 37.448276
| 117
| 0.642152
| false
| 3.926056
| false
| false
| false
|
jnez71/demos
|
geometry/bezier_surface.py
|
1
|
5934
|
#!/usr/bin/env python3
"""
Efficient implementation of a Bezier surface and its differential geometry.
"""
from __future__ import division
import numpy as np
################################################## CORE
class Bezier(object):
"""
Bezier manifold of dimension 2 embedded in Euclidean space of dimension 3.
"""
def __init__(self, knots=None):
if knots is None:
# Default to identity patch
n = 4
knots = np.zeros((n, n, 3), dtype=np.float64)
for i in range(n):
for j in range(n):
knots[i, j] = np.float64((i, j, 0)) / (n-1)
self.set_knots(knots)
def set_knots(self, knots):
"""
Provide the control knots in an array with the first two
dimensions indexing which knot and the third dimension
holding the Euclidean coordinates of each knot.
"""
self.knots = np.array(knots, dtype=np.float64)
self.degree_x = self.knots.shape[0] - 1
self.degree_y = self.knots.shape[1] - 1
self.dimension = self.knots.shape[2] - 1
assert self.degree_x > 0
assert self.degree_y > 0
assert self.dimension == 2
self.dknots_x = self.degree_x * np.diff(self.knots, axis=0)
self.dknots_y = self.degree_y * np.diff(self.knots, axis=1)
def evaluate(self, x, y):
"""
De Casteljau's algorithm is used to map the given surface coordinates
(each from 0.0 to 1.0) to their corresponding location in Euclidean space.
"""
lerps_x = np.zeros((self.degree_x+1, self.dimension+1), dtype=np.float64)
for i in range(len(lerps_x)):
lerps_y = self.knots[i].copy()
for j in range(self.degree_y):
for k in range(self.degree_y - j):
lerps_y[k] = (1.0-y)*lerps_y[k] + y*lerps_y[k+1]
lerps_x[i] = lerps_y[0]
for i in range(self.degree_x):
for k in range(self.degree_x - i):
lerps_x[k] = (1.0-x)*lerps_x[k] + x*lerps_x[k+1]
return lerps_x[0]
def jacobian(self, x, y):
"""
Returns the 2by3 Jacobian matrix of the `evaluate` function
at the given argument. The Grammian of this is the metric tensor.
"""
return np.column_stack((Bezier(self.dknots_x).evaluate(x, y),
Bezier(self.dknots_y).evaluate(x, y)))
def metric(self, x, y):
"""
Returns the 2by2 metric tensor at the given surface coordinates.
"""
J = self.jacobian(x, y)
return J.T.dot(J)
def orientation(self, x, y, q=0.0):
"""
Returns a rotation matrix describing the orientation of the normal
coordinates at [`x`, `y`] with yaw angle `q` in radians.
"""
J = self.jacobian(x, y)
rx, ry = (J / np.linalg.norm(J, axis=0)).T
normal = np.cross(rx, ry)
ncrossx = np.cross(normal, rx) # must be re-unitized to mitigate roundoff error
tangent = np.cos(q)*rx + np.sin(q)*(ncrossx / np.linalg.norm(ncrossx))
binormal = np.cross(normal, tangent)
R = np.column_stack((tangent, binormal, normal))
return R / np.linalg.norm(R, axis=0) # must be re-unitized to mitigate roundoff error
def plot(self, n=40, block=True):
"""
Plots this surface discretized by the given grid size `n`.
Also shows the control knots and the central normal coordinate system.
"""
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
mesh = np.linspace(0.0, 1.0, n)
points = np.transpose([self.evaluate(x, y) for x in mesh for y in mesh])
quiver_origins = np.transpose([self.evaluate(mesh[n//2], mesh[n//2])]*3)
quiver_arrows = self.orientation(mesh[n//2], mesh[n//2])
fig = pyplot.figure()
ax = fig.add_subplot(111, projection="3d")
ax.set_title("bezier", fontsize=12)
ax.set_xlabel("rx", fontsize=12)
ax.set_ylabel("ry", fontsize=12)
ax.set_zlabel("rz", fontsize=12)
ax.scatter(*self.knots.reshape(-1, 3).T, c='r', s=80)
ax.scatter(*points, c=points[-1, :], s=60, marker='o', edgecolors=None)
ax.quiver(quiver_origins[0], quiver_origins[1], quiver_origins[2],
quiver_arrows[0], quiver_arrows[1], quiver_arrows[2],
length=0.25, color=(1.0, 0.5, 0.0), lw=2.5)
ax.axis("equal")
pyplot.show(block=block)
################################################## TEST
if __name__ == "__main__":
# Initialize a flat set of knots
knots = np.zeros((5, 4, 3), dtype=np.float64)
for i in range(knots.shape[0]):
for j in range(knots.shape[1]):
knots[i, j] = np.float64((i, j, 0))
# Mess with the knots to make them more interesting
knots[:, :, 0] *= -1.0
knots[1:3, 1:3, 2] = -1.0
knots[1:3, 0, 2] = (0.25, 0.5)
knots[-1, -1, :] = (-4/2, 3/2, 0.5)
# Construct the Bezier surface
bezier = Bezier(knots)
# Verify the analytical Jacobian against finite-differences at a random location
x, y = np.random.sample(2)
r = bezier.evaluate(x, y)
d = 1e-6
drdx = (bezier.evaluate(x+d, y) - r) / d
drdy = (bezier.evaluate( x, y+d) - r) / d
assert np.allclose(np.column_stack((drdx, drdy)), bezier.jacobian(x, y), atol=10*d)
# Verify that the metric tensor computation is consistent with finite-differences
assert np.allclose([[drdx.dot(drdx), drdx.dot(drdy)],
[drdy.dot(drdx), drdy.dot(drdy)]], bezier.metric(x, y), atol=10*d)
# Verify that the orientation calculation returns an orthonormal matrix
R = bezier.orientation(x, y, 2*np.pi*np.random.sample())
assert np.allclose(R.dot(R.T), np.eye(3))
# Plot the corresponding Bezier surface to visually inspect
bezier.plot()
|
mit
| -1,133,954,785,867,220,100
| 37.283871
| 94
| 0.568419
| false
| 3.092236
| false
| false
| false
|
appleseedhq/cortex
|
test/IECoreScene/MeshNormalsOpTest.py
|
1
|
4405
|
##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import IECoreScene
import math
class MeshNormalsOpTest( unittest.TestCase ) :
def testPlane( self ) :
p = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) )
if "N" in p :
del p["N"]
self.assert_( not "N" in p )
pp = IECoreScene.MeshNormalsOp()( input=p )
self.assert_( "N" in pp )
self.assertEqual( pp["N"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
normals = pp["N"].data
self.assert_( normals.isInstanceOf( IECore.V3fVectorData.staticTypeId() ) )
self.assertEqual( normals.size(), pp.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( normals.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
for n in normals :
self.assertEqual( n, imath.V3f( 0, 0, 1 ) )
def testOnlyNAdded( self ) :
p = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) )
del p["N"]
pp = IECoreScene.MeshNormalsOp()( input=p )
del pp["N"]
self.assertEqual( pp, p )
def testSphere( self ) :
s = IECore.Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob" ).read()
del s["N"]
self.assert_( not "N" in s )
ss = IECoreScene.MeshNormalsOp()( input=s )
self.assert_( "N" in ss )
self.assertEqual( ss["N"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
normals = ss["N"].data
self.assert_( normals.isInstanceOf( IECore.V3fVectorData.staticTypeId() ) )
self.assertEqual( normals.size(), ss.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( normals.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
points = ss["P"].data
for i in range( 0, normals.size() ) :
self.assert_( math.fabs( normals[i].length() - 1 ) < 0.001 )
p = points[i].normalize()
self.assert_( normals[i].dot( p ) > 0.99 )
self.assert_( normals[i].dot( p ) < 1.01 )
def testUniformInterpolation( self ) :
m = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ), imath.V2i( 10 ) )
del m["N"]
self.assertTrue( "N" not in m )
m2 = IECoreScene.MeshNormalsOp()( input = m, interpolation = IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( m2["N"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( len( m2["N"].data ), m2.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
for n in m2["N"].data :
self.assertEqual( n, imath.V3f( 0, 0, 1 ) )
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
| -3,052,171,660,580,019,000
| 38.330357
| 116
| 0.69353
| false
| 3.563916
| true
| false
| false
|
lihuanshuai/libmc
|
tests/shabby/reconnect_delay.py
|
1
|
1557
|
# coding: utf-8
import os
import time
import libmc
import slow_memcached_server
import subprocess
def memcached_server_ctl(cmd, port):
ctl_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)
))),
'misc', 'memcached_server'
)
print ctl_path
subprocess.check_call([ctl_path, cmd, str(port)])
def test_soft_server_error():
mc = libmc.Client(["127.0.0.1:%d" % slow_memcached_server.PORT])
mc.config(libmc._client.MC_POLL_TIMEOUT,
slow_memcached_server.BLOCKING_SECONDS * 1000 * 2) # ms
RETRY_TIMEOUT = 2
mc.config(libmc.MC_RETRY_TIMEOUT, RETRY_TIMEOUT)
assert mc.set('foo', 1)
assert not mc.set(slow_memcached_server.KEY_SET_SERVER_ERROR, 1)
assert mc.set('foo', 1) # back to live
time.sleep(RETRY_TIMEOUT / 2)
assert mc.set('foo', 1) # alive
time.sleep(RETRY_TIMEOUT + 1)
assert mc.set('foo', 1) # alive
def test_hard_server_error():
normal_port = 21211
mc = libmc.Client(["127.0.0.1:%d" % normal_port])
RETRY_TIMEOUT = 10
mc.config(libmc.MC_RETRY_TIMEOUT, RETRY_TIMEOUT)
assert mc.set('foo', 1)
memcached_server_ctl('stop', normal_port)
assert not mc.set('foo', 1) # still fail
memcached_server_ctl('start', normal_port)
assert not mc.set('foo', 1) # still fail
time.sleep(RETRY_TIMEOUT + 1)
assert mc.set('foo', 1) # back to live
def main():
test_soft_server_error()
test_hard_server_error()
if __name__ == '__main__':
main()
|
bsd-3-clause
| -3,118,505,879,109,745,000
| 24.95
| 70
| 0.62492
| false
| 3.046967
| false
| false
| false
|
eirannejad/pyRevit
|
extensions/pyRevitTools.extension/pyRevit.tab/Project.panel/ptools.stack/Links.pulldown/Create Workset For Linked Element.pushbutton/script.py
|
1
|
1095
|
from pyrevit import revit, DB, UI
from pyrevit import script
from pyrevit import forms
logger = script.get_logger()
selection = revit.get_selection()
linkedModelName = ''
if len(selection) > 0:
for el in selection:
if isinstance(el, DB.RevitLinkInstance):
linkedModelName = el.Name.split(':')[0]
elif isinstance(el, DB.ImportInstance):
linkedModelName = \
el.Parameter[DB.BuiltInParameter.IMPORT_SYMBOL_NAME].AsString()
if linkedModelName:
if not revit.doc.IsWorkshared and revit.doc.CanEnableWorksharing:
revit.doc.EnableWorksharing('Shared Levels and Grids',
'Workset1')
with revit.Transaction('Create Workset for linked model'):
newWs = DB.Workset.Create(revit.doc, linkedModelName)
worksetParam = \
el.Parameter[DB.BuiltInParameter.ELEM_PARTITION_PARAM]
worksetParam.Set(newWs.Id.IntegerValue)
else:
forms.alert('At least one linked element must be selected.')
|
gpl-3.0
| -6,848,140,956,992,978,000
| 35.5
| 79
| 0.624658
| false
| 3.967391
| false
| false
| false
|
9p0le/simiki
|
simiki/log.py
|
1
|
2342
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from logging import getLogger, Formatter, StreamHandler
from simiki import utils
from simiki.compat import is_linux, is_osx
class ANSIFormatter(Formatter):
"""Use ANSI escape sequences to colored log"""
def format(self, record):
try:
msg = super(ANSIFormatter, self).format(record)
except:
# for python2.6
# Formatter is old-style class in python2.6 and type is classobj
# another trick: http://stackoverflow.com/a/18392639/1276501
msg = Formatter.format(self, record)
lvl2color = {
"DEBUG": "blue",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "bgred"
}
rln = record.levelname
if rln in lvl2color:
return "[{0}]: {1}".format(
utils.color_msg(lvl2color[rln], rln),
msg
)
else:
return msg
class NonANSIFormatter(Formatter):
'''Non ANSI color format'''
def format(self, record):
try:
msg = super(NonANSIFormatter, self).format(record)
except:
# for python2.6
# Formatter is old-style class in python2.6 and type is classobj
# another trick: http://stackoverflow.com/a/18392639/1276501
msg = Formatter.format(self, record)
rln = record.levelname
return "[{0}]: {1}".format(rln, msg)
def _is_platform_allowed_ansi():
'''ansi be used on linux/macos'''
if is_linux or is_osx:
return True
else:
return False
def logging_init(level=None, logger=getLogger(),
handler=StreamHandler(), use_color=True):
if use_color and _is_platform_allowed_ansi():
fmt = ANSIFormatter()
else:
fmt = NonANSIFormatter()
handler.setFormatter(fmt)
logger.addHandler(handler)
if level:
logger.setLevel(level)
if __name__ == "__main__":
logging_init(level=logging.DEBUG)
root_logger = logging.getLogger()
root_logger.debug("debug")
root_logger.info("info")
root_logger.warning("warning")
root_logger.error("error")
root_logger.critical("critical")
|
mit
| 7,475,655,482,972,480,000
| 25.91954
| 76
| 0.584116
| false
| 3.916388
| false
| false
| false
|
akegan/plasmoids
|
relReconVars.py
|
1
|
9293
|
###############################################
###
### This file is generated by wpp.
###
### Input file : /scr_verus/wernerg/vrun/relRecon/relReconRepo/relReconPre.py
### Output file : relRecon.in
### Translation: 2014 Dec 04, 14:26:11
###
### disthistMac version $Id: disthistMac.py 104 2014-04-29 03:36:27Z wernerg $
### domainDecomp version $Id: domainDecomp.py 93 2014-01-24 04:32:36Z wernerg $
### funcMac version $Id: funcMac.py 103 2014-02-18 23:17:40Z wernerg $
### histMac version $Id: histMac.py 99 2014-02-18 21:45:07Z wernerg $
### mathphysMac version $Id: mathphysMac.py 55 2013-03-07 19:25:09Z wernerg $
### verbosityMac version $Id: verbosityMac.py 55 2013-03-07 19:25:09Z wernerg $
### wpp version $Id: wpp.py 102 2014-02-18 23:09:05Z wernerg $
### wppGlobals version $Id: wppGlobals.py 75 2013-08-14 01:55:44Z wernerg $
###
### created for vorpal r22018
###
###############################################
##########
#
# Import needed modules
#
##########
import sys
sys.path.append(".")
import math
#
__FILE__ = "/scr_verus/wernerg/vrun/relRecon/relReconRepo/relReconPre.py"
T_S_ION = 11118542910.652687
DENSITY_BG = 470659793006.75714
GAMMA_MINUS_1_DRIFT_ION = 0.25000000000000044
baseExpr1 = '( -0.0085225444505285155 * lncosh((93.75)*(1/5.0)))'
NOM_DUMPPERIOD = 101
estHistNumsPerStep = 20803.793333333335
USE_VAY_MOVE = 0
LY_BOX = 187.5
PLASMA_SIGMA_I = 59.999999999999993
LX_PML_XLO = 0.0
mathphysMacVersion = '$Id: mathphysMac.py 55 2013-03-07 19:25:09Z wernerg $'
PLASMA_SIGMA_E = 59.999999999999993
vorpalExec = '/scr_verus/wernerg/vorpals/mainline3/vorpalall/builds/vorpal/par/vorpal/vorpal'
X_PML_XHI = 281.25
baseExpry = '( -0.0085225444505285155 * lncosh((y)*(1/5.0)))'
SMOOTH_J = 0
ELECMASS = 9.10938215e-31
SEED_PTCLS_KT_OVER_MCSQR = 10.0
SMOOTH_E = 2
LIGHTSPEED = 299792458.0
p = 20
LY_TOT = 375.0
numCells = [400, 400]
DENSITY_0 = 4706597930067.5713
BETA_DRIFT_ION = 0.59999999999999987
LARMOR_PERIODS_PER_SEED_SAVE = 50
insertProb = 0.2525
BASE_AMP_UPDATERS = ['yeeAmpere']
NZ_ABC = 0
CFL_FRAC = 0.99
MPTCLS_PER_DZ = 1
MPTCLS_PER_DY = 8
LZ_TOTAL_BOXSIZE_OVER_LARMOR_LENGTH = 0.0
PTCL_BCS = ['periodic', 'periodic', 'periodic']
numDomains = 64
NX_BEGIN = 0
procInput = True
PERIODIC_DIRS = [0, 1, 2]
LY_OVER_LARMOR_LENGTH = 100.0
LX_PML_XHI = 0.0
ELECCHARGE = -1.602176487e-19
X_PML_XLO = -93.75
USE_CELL_SPECIES = 0
LZ_PML_ZHI = 0.0
ezAtXHistBlockUpNYs = [300, 301]
GAMMA_MINUS_1_DRIFT = 0.25
TRACK_PTCLS = 1
VP_DEBUG = 7
BG_MPTCLS_PER_DZ = 1
VP_WARNING = 4
LX_TOTAL_BOXSIZE_OVER_LARMOR_LENGTH = 200.0
ALL_SPECIES_HIST_PERIOD = 100
X_END = 281.25
densityBins = [100, 100]
LZ = 0.0
LX = 375.0
LY = 375.0
maxCellsPerDomain = 3300
numLatitudeBins = 60
ezAvgHistBlockDnNXs = [206, 238, 254, 270, 278, 286, 290, 294, 296, 298, 299, 300, 301, 302, 304, 306, 310, 314, 322, 330, 346, 362, 394]
SMOOTH_E_ENERGY_LEFT_AT_NYQ_K = 0.35910836115772415
NORMALIZE_LARMOR = 1
BG_DIST_IS_POWER_LAW = 0
FIELDDUMPPERIOD = 100
numLongitudeBins = 120
midDeltaNY = 1
ezAvgHistBlockDnMidNYs = [99, 101]
VP_DEBUG3 = 9
VP_DEBUG2 = 8
SMOOTH_E_ENERGY_LOSS_AT_NYQ_K = 0.6408916388422758
NX_END = 400
LCM_PERIOD = 20
PBC_IN_X = 1
PBC_IN_Y = 1
PBC_IN_Z = 1
LX_TOTS = [375.0, 375.0, 0.0]
yp = 'yPosHolder'
Z_START = -0.0
VP_INFO = 6
name = 'BxQ7Right'
A_PERTURB_AMP = 0.01
PROTMASS = 1.672621637e-27
ezAvgHistBlockUpNXs = [6, 38, 54, 70, 78, 86, 90, 94, 96, 98, 99, 100, 101, 102, 104, 106, 110, 114, 122, 130, 146, 162, 194]
LZ_PML_ZLO = 0.0
NY_ABC = 0
ezAtXHistBlockDnNXs = [300, 301]
SMALL_DIST_CALC_PERIOD = 50
TRACK_PTCLS_YRANGE = 'LY_BOX/2.'
MIN_WEIGHT = 1e-06
DOUBLE_BOX_IN_X = 1
LX_BOX = 187.5
LARMOR_FREQ = 159889310.93333334
oPtUpNY = 300
oPtUpNX = 300
Y_PML_YLO = -93.75
smoothers2InX = ['smoothAgainE00', 'smoothAgainE10', 'smoothAgainE20']
smoothers2InY = ['smoothAgainE01', 'smoothAgainE11', 'smoothAgainE21']
NZ_END = 0
useIsoConductors = 0
AMPERE_UPDATERS = ['yeeAmpere']
LARMOR_LENGTH = 1.875
Y_START = -93.75
B_0 = 0.001704508890105703
densExpr = '4.706597930067571e+12 / 4.706597930067571e+12 * (1. / cosh(yPosHolder*(1./5.0)))^2'
T_BG = 59298895.523481
trackPtclsYrange = 93.75
coef = '-0.0085225444505285155'
qi = 7
NUMFIELDDUMPS = 20
LX_TOT = 375.0
xc = 281.25
xPtUpNX = 100
xPtUpNY = 300
TRAJ_APPLY_PERIOD = 6
NZ = 0
NUM_EZ2D_HISTS_ALONG_X = 20
GAMMA_BG_INDEX = 2.0
USE_ABC = False
BETA_GAMMA_DRIFT_ION = 0.75
PIO2 = 1.5707963267948966
KB = 1.3806504e-23
MU0 = 1.2566370614359173e-06
DT1D = 3.1271633924826753e-09
VP_ALERT = 1
EZ_AVG_CALC_PERIOD = 50
oPtDnNX = 100
oPtDnNY = 100
verbosityMacVersion = '$Id: verbosityMac.py 55 2013-03-07 19:25:09Z wernerg $'
false = False
E_B_FAC = 1.008
LX_TOTAL_BOXSIZE_OVER_DELTA = 75.0
DZ = 1.0
DX = 0.9375
DY = 0.9375
ezAvgHistBlockDnNYs = [100, 104, 108, 116, 124, 140]
DT = 2.186936930239505e-09
SIN_SQR_LAYER_K = 0.3086582838174551
SVN_REVISION_PREFILE = '$Rev: 43 $'
Y_PML_YHI = 281.25
useGammaNumberDensity = True
GAMMA_DRIFT_ION = 1.2500000000000004
GAMMA_BG_LO = 1.0
SMOOTH_E_ENERGY_LOSS_AT_LAYER_K = 0.09294990994629804
ENERGY_CALC_PERIOD = 20
prty = '(1 + 0.01 * cos(3.141592653589793 * (x) / 187.5) * cos(3.141592653589793*(y)/187.5)^2 )'
BETA_DRIFT = 0.6
NUMDUMPS = 20
ezAvgHistBlockHeights = [4, 4, 8, 8, 16]
prtyp = '(1 + 0.01 * cos(3.141592653589793 * (x - 187.5) / 187.5) * cos(3.141592653589793*(if(y <= 0.5*187.5, y, 187.5 - y))/187.5)^2 )'
DENSITY_FACTOR = 4706597930067.5713
LAYER_PTCL_STAT_MAPCELLSIZE = [0]
PTCL_BC_IN_X = 'periodic'
PTCL_BC_IN_Y = 'periodic'
PTCL_BC_IN_Z = 'periodic'
Z_PML_ZLO = 0.0
numGammaBins = 50
PY = 100
gammaBinMin = 1.0
xcMax = 281.25
SMOOTH_E_ENERGY_LEFT_AT_LAYER_K = 0.907050090053702
gammaBinMax = 375.0
X_START = -93.75
SMOOTH_E_ENERGY_LEFT_AT_HALF_NYQ_K = 0.7741356267766225
LXS = [375.0, 375.0, 0.0]
M_CONST = 3.7955758958333335e-30
LARGE_DIST_CALC_PERIOD_STR = '250'
NX_TOT = 400
VP_NOTICE = 5
DOMAIN_DECOMP = ('quadWeighted', 64, [1.0, 3.0])
BOX_CROSSING_TIME = 8.844953762811314e-07
VP_CRITICAL = 2
PZ = 0
PX = 200
PI = 3.141592653589793
baseExpryp = '( -0.0085225444505285155 * lncosh((if(y <= 0.5*187.5, y, 187.5 - y))*(1/5.0)))'
EZ_AVG_NUM_CONSECUTIVE_STEPS = 5
endy = 50
endx = 50
wl = 1
NUM_TRACK_PTCLS = 10000
wy = 3.0
wx = 1.0
LZ_TOT = 0.0
K_LAYER = 1.2566370614359172
Z_PML_ZHI = 0.0
E_B_FAC2 = 0.992
GAMMA_SCALE = 1.875
DUMP_NODAL_FIELDS = 0
MASS_RATIO = 1.0
LARGE_DIST_CALC_PERIOD = 100
SIMTIME_OVER_NR_LARMOR_PERIOD = 0
velGenDrift = {'kind': 'relMaxwell', 'T': 11118542910.652687}
gammaBetaSqr = 0.4499999999999999
NX_ABC = 0
DT2D = 2.211238440702829e-09
C2 = 8.987551787368176e+16
NY_TOT = 400
GAMMA_BG_HI = 100.0
kB_T_S_ION_OVER_MCSQR = 1.875
halfLy = 93.75
kB_T_S_OVER_MCSQR = 1.875
IONMASS = 9.10938215e-31
ACTUAL_SIM_TIME = 4.3738738604790101e-06
TIMESTEPS_PER_LARMOR = 2.859856952655878
ezAvgBlockSectionCellsY = 4
NX_TOTS = [400, 400, 0]
ptclBC = 'periodic'
LY_PML_YHI = 0.0
TIMESTEPS = 2000
BG_MPTCLS_PER_DX = 4
BG_MPTCLS_PER_DY = 8
TWOPI = 6.283185307179586
MPTCLS_PER_DX = 4
LY_TOTAL_BOXSIZE_OVER_DELTA = 75.0
SMALL_DIST_CALC_PERIOD_STR = '50'
T_BG_ION = 59298895.523481
SIMTIME_OVER_BOX_CROSSING_TIME = 5.0
CELLS_PER_LARMOR_LENGTH = 2.0
DELTA = 5.0
T_S = 11118542910.652687
LZ_OVER_LARMOR_LENGTH = 0.5
NZ_TOT = 0
BETA_GAMMA_DRIFT = 0.75
inputFile = 'relRecon.in'
Y_END = 281.25
ELEMCHARGE = 1.602176487e-19
decompTypes = ['even', 'weighted', 'quadWeighted']
NZ_BEGIN = 0
VP_EMERGENCY = 0
ezAvgBlockSectionCellsX = 1
NXS = [400, 400, 0]
SMOOTH_E_A_FAC2 = 0.004
SMOOTH_E_ENERGY_LOSS_AT_HALF_NYQ_K = 0.2258643732233775
bpf = 8.0
SMOOTH_E_A_FAC = -0.004
J_0 = 271.28101540440281
B_GUIDE_OVER_B0 = 0.0
numCellsT = 160000
NY_BEGIN = 0
VP_ERROR = 3
LY_TOTAL_BOXSIZE_OVER_LARMOR_LENGTH = 200.0
LZ_TOTAL_BOXSIZE_OVER_DELTA = 0.0
DUMPPERIOD = 100
LY_PML_YLO = 0.0
ezAtXHistBlockUpNXs = [100, 101]
EZ_AVG_CALC_PERIOD_STR = '50'
tubs = [281.25, 93.75, 0.0]
NOM_TIME = 4.4224768814056575e-06
NDIM = 2
run = True
ASPECT_RATIO_Y_OVER_X = 1.0
MUONMASS = 1.8835313e-28
BASE_FAR_UPDATERS = ['yeeFaraday']
estPtclNums = 1414.0
ndy = 8
ndx = 8
ezAvgHistBlockUpMidNYs = [299, 301]
PTCLDUMPPERIOD = 100
ezAvgHistBlockWidths = [32, 16, 16, 8, 8, 4, 4, 2, 2, 1, 1, 1, 1, 2, 2, 4, 4, 8, 8, 16, 16, 32]
SET_ION_LAYER = 'theta'
tlbs = [-93.75, 0.0, -0.0]
VEL_NORM_FACTOR = 5.892988110396219e-64
DENSITY_BG_OVER_0 = 0.1
NX = 400
NY = 400
ORIGIN = [-93.75, -93.75, -0.0]
estFieldNums = 22.0
true = True
smoothersInY = ['smoothE01', 'smoothE11', 'smoothE21']
smoothersInX = ['smoothE00', 'smoothE10', 'smoothE20']
OTHER_EZ_AVG_CALC_PERIOD = 100
EPSILON0 = 8.854187817620389e-12
smallPeriods = [20]
SEED_PTCLS = 0
DOUBLE_PERIODIC_IN_Y = 1
ELECMASSEV = 510998.90984764055
periods = [50, 250, 50, 20]
SVN_ID_PREFILE = '$Id: relRecon.pre 43 2012-06-22 16:25:13Z wernerg $'
VelGenBg = {'kind': 'relMaxwell', 'T': 59298895.523481}
LX_OVER_LARMOR_LENGTH = 100.0
velGenDriftIon = {'kind': 'relMaxwell', 'T': 11118542910.652687}
NY_END = 400
cellsPerDomX = [50, 50, 50, 50, 50, 50, 50, 50]
cellsPerDomY = [65, 35, 34, 64, 66, 36, 35, 65]
nx = 400
ny = 400
ezAtXHistBlockDnNYs = [100, 101]
CELLS_PER_SHEET_THICKNESS = 5.333333333333333
xcDelta = 18.75
Z_END = 0.0
USE_GAMMAM1_BINS = 0
FARADAY_UPDATERS = ['yeeFaraday']
GAMMA_DRIFT = 1.25
VelGenBgIon = {'kind': 'relMaxwell', 'T': 59298895.523481}
xPtDnNY = 100
xPtDnNX = 300
DUMP_AT_START = 0
DT3D = 1.843048705090566e-09
ALPHA = 0.0
|
mit
| 7,488,255,541,433,285,000
| 27.160606
| 137
| 0.689551
| false
| 2.087376
| false
| false
| false
|
esteluk/reinhardt
|
memberinfo/mailman.py
|
2
|
4938
|
# Copyright (C) 1998-2007 by the Free Software Foundation, Inc.
# Much of this is based on /usr/lib/mailman/bin/paths.py and Fixes the path of the project in order to use mailman
# BEGIN MAILMAN PATH INCLUSION ---------------------------
import os
import sys
from warnings import filterwarnings
# some scripts expect this attribute to be in this module
prefix = '/var/lib/mailman'
exec_prefix = '${prefix}'
# work around a bogus autoconf 2.12 bug
if exec_prefix == '${prefix}':
exec_prefix = prefix
# Supress Python 2.5 warning about string exceptions.
filterwarnings('ignore', '.* string exception', DeprecationWarning)
# Hack the path to include the parent directory of the $prefix/Mailman package
# directory.
sys.path.insert(0, prefix)
# We also need the pythonlib directory on the path to pick up any overrides of
# standard modules and packages. Note that these must go at the front of the
# path for this reason.
sys.path.insert(0, os.path.join(prefix, 'pythonlib'))
# Include Python's site-packages directory.
sitedir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3],'site-packages')
sys.path.append(sitedir)
# END MAILMAN PATH INCLUSION ---------------------------
from compsoc.memberinfo.models import MailingList
from Mailman import Utils
from Mailman import MailList
from django.contrib.auth.models import User
from Mailman import Errors
def validate_lists():
'''
Checks current data in the compsoc database corresponds to that in Mailman.
Caveat: they have to be subscribed using the same email address they use for the compsoc website.
This includes:
Checking all lists in the MailingList model have a mailman equivalent
Checking all signups to a list are subscribed to the mailman list
'''
for list in MailingList.objects.all():
if not Utils.list_exists(list.list):
print "%s doesn't exist on mailman" % list.list
else:
mailman_list = MailList.MailList(list.list, lock=False)
members = mailman_list.getMemberCPAddresses(mailman_list.getRegularMemberKeys()+mailman_list.getDigestMemberKeys())
for user in list.users.all():
if not user.email in members:
print "The website thinks %s is subscribed to %s but he/she isn't" % (user.member.all_name(),list.list)
def import_lists(prefix):
'''
Imports lists named with the given prefix from mailman
into the compsoc website.
Caveat: they have to be subscribed using the same email
address they use for the compsoc website.
'''
for list_name in Utils.list_names():
if list_name.startswith(prefix):
list,new = MailingList.objects.get_or_create(list=list_name)
mailman_list = MailList.MailList(list_name, lock=False)
members = mailman_list.getMemberCPAddresses(mailman_list.getRegularMemberKeys()+mailman_list.getDigestMemberKeys())
for member in members:
try:
list.users.add(User.objects.get(email=member))
except User.DoesNotExist: pass
class UserDesc:
def __init__(self,name,address):
self.name = name
self.address = address
self.digest = False
class MailmanError(Exception):
def __init__(self,msg):
self.msg = msg
def subscribe_member(user,list):
'''
Adds a compsoc member to a mailing list
'''
try:
mailman_list = MailList.MailList(list.list)
try:
# 1 = send welcome message
mailman_list.ApprovedAddMember(UserDesc(user.member.all_name(),user.email), 1, 0)
mailman_list.Save()
except Errors.MMAlreadyAMember:
raise MailmanError('User is already a member')
except Errors.MembershipIsBanned, pattern:
raise MailmanError("User's email is banned by pattern %s " % pattern)
except Errors.MMBadEmailError:
raise MailmanError("Mailman has rejected the user's email")
except Errors.MMHostileAddress:
raise MailmanError('User is considered hostile by mailman')
finally:
mailman_list.Unlock()
except Errors.MMUnknownListError:
raise MailmanError("This mailman list doesn't exist")
def unsubscribe_member(user,list):
'''
Removes a compsoc member from a mailing list
'''
try:
mailman_list = MailList.MailList(list.list)
try:
if not mailman_list.isMember(user.email):
raise MailmanError("User isn't subscribed to the list")
#last 2 args: is admin notified, is user notified
mailman_list.ApprovedDeleteMember(user.email, 'bin/remove_members',True,True)
mailman_list.Save()
finally:
mailman_list.Unlock()
except Errors.MMUnknownListError:
raise MailmanError("This mailman list doesn't exist")
|
agpl-3.0
| -8,628,226,163,998,219,000
| 38.190476
| 127
| 0.662009
| false
| 3.966265
| false
| false
| false
|
liaozhida/liaozhida.github.io
|
_posts/pythonbak/preCaptcha.py
|
1
|
2197
|
# -*- coding: utf-8 -*-
import requests
import json
from bs4 import BeautifulSoup
import time
class CaptchaHelper:
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Host':'www.zhihu.com',
'Origin':'https://www.zhihu.com',
'Referer':'https://www.zhihu.com',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'
}
data_file = open('config.json')
self.data = json.load(data_file)
self._session = requests.session()
def douban(self):
url = self.data["douban"]["captcha-url"]
response = self._session.get(url)
# print response.text
page = BeautifulSoup(response.text, 'lxml')
captcha_id = page.find('input', attrs={'name':'captcha-id'})['value']
imageurl = page.find('img', alt='captcha')['src']
response = requests.get(imageurl, stream=True)
with open('./captcha/douban.png', 'wb') as f:
f.write(response.content)
f.close
del response
self.data['douban']['captcha-id'] = captcha_id
print self.data
file = open('config.json','w')
file.write('\r\n')
json.dump(self.data,file);
file.close()
def zhihu(self):
# 获取验证码链接
imageurl = self.data['zhihu']['captcha-url']
print imageurl
imageurl = 'http://www.zhihu.com/captcha.gif?r=%d&type=login';
response = self._session.get(imageurl % (time.time() * 1000), headers=self.headers)
# 保存验证码到本地
with open('./captcha/zhihu.png', 'wb') as f:
f.write(response.content)
f.close
del response
# 写入cookie信息
file = open('zhihu_cookies','w');
cookies = self._session.cookies.get_dict()
json.dump(cookies, file)
file.close()
if __name__ == '__main__':
ch = CaptchaHelper();
# ch.douban()
ch.zhihu();
else:
print 'being imported as module'
|
apache-2.0
| -7,904,162,348,769,327,000
| 25.654321
| 150
| 0.553497
| false
| 3.416139
| false
| false
| false
|
SystemsBioinformatics/cbmpy
|
setupegg.py
|
1
|
1159
|
"""
CBMPy: Constraint Based Modelling in Python (http://pysces.sourceforge.net/cbm)
============
Copyright (C) 2010-2018 Brett G. Olivier, VU University Amsterdam, Amsterdam, The Netherlands
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Author: Brett G. Olivier
Contact email: bgoli@users.sourceforge.net
Last edit: $Author: bgoli $ ($Id: setupegg.py 660 2018-09-24 14:57:04Z bgoli $)
"""
"""
A setup.py script to use setuptools, which gives egg goodness, etc.
Adapted from the original NumPy src (numpy.scipy.org).
"""
FRYING_EGGS = True
from setuptools import setup
execfile('setup.py')
|
gpl-3.0
| 2,300,610,091,657,087,700
| 35.21875
| 93
| 0.762726
| false
| 3.512121
| false
| false
| false
|
Jelby-John/HatalogicoWeatherStation
|
main.py
|
1
|
5387
|
#!/usr/bin/python
from Adafruit_PWM_Servo_Driver import PWM
from Adafruit_ADS1x15 import ADS1x15
import time, os, sys
import Adafruit_DHT
# HOW MANY CYCLES TO BE PERFORMED BEFORE SHOWING THE HIGH AND LOW SEQUENCE
# SET TO 0 FOR OFF
intervalHighLow = 60
# HOW LONG TO REST BETWEEN CYCLES - ZERO IS FINE
intervalSleep = 1
# HOW LONG TO DISPLAY THE HIGH AND LOW DISPLAYS
intervalDisplay = 5
# INTERVAL COUNTER/TRACKER. ALWAYS START AT 1
intervalCounter = 1
# Sensor should be set to Adafruit_DHT.DHT11,
# Adafruit_DHT.DHT22, or Adafruit_DHT.AM2302.
DHTsensor = Adafruit_DHT.DHT22
DHTpin = '22'
# SETUP THE PWMS
pwm = PWM(0x70)
pwm.setPWMFreq(100)
# SETUP THE ADCS
ADS1015 = 0x00
gain = 6144
sps = 100
adc = ADS1x15(address=0x49, ic=ADS1015)
# SET LEFT AND RIGHT POSITION FOR SERVOS
servoMin = 380
servoMax = 1150
# DEFINE SERVO PINS ON HATALOGICO PWMS
servoLight = 8
servoHumid = 10
servoTemp = 12
# DEFINE MAX AND MIN VALUES
tempMin = 40
tempMax = 15
humidMin = 100
humidMax = 0
lightMin = 1
lightMax = 2800
# DECLARE DEFAULT VALUES FOR HIGH AND LOW TRACKERS
tempHigh = 0
tempLow = 100
humidHigh = 0
humidLow = 100
lightHigh = 0
lightLow = 100
# LED PIN CONFIG ON HATALOGICO PWMS
brightRed = 3
brightGreen = 5
humidRed = 7
humidGreen = 9
tempRed = 11
tempGreen = 13
def showHighs():
# SCALE READINGS INTO OUTPUT VALUES
tempPercent = (tempHigh - tempMin) / (tempMax - tempMin)
tempOutput = int(tempPercent * (servoMax - servoMin) + servoMin)
lightPercent = (lightHigh - lightMin) / (lightMax - lightMin)
lightOutput = int(lightPercent * (servoMax - servoMin) + servoMin)
humidPercent = (humidHigh - humidMin) / (humidMax - humidMin)
humidOutput = int(humidPercent * (servoMax - servoMin) + servoMin)
pwm.setPWM(brightGreen, 0, 4095)
pwm.setPWM(brightRed, 0, 0)
pwm.setPWM(humidGreen, 0, 4095)
pwm.setPWM(humidRed, 0, 0)
pwm.setPWM(tempGreen, 0, 4095)
pwm.setPWM(tempRed, 0, 0)
pwm.setPWM(servoTemp, 0, tempOutput)
pwm.setPWM(servoHumid, 0, humidOutput)
pwm.setPWM(servoLight, 0, lightOutput)
time.sleep(intervalDisplay)
def showLows():
# SCALE READINGS INTO OUTPUT VALUES
tempPercent = (tempLow - tempMin) / (tempMax - tempMin)
tempOutput = int(tempPercent * (servoMax - servoMin) + servoMin)
lightPercent = (lightLow - lightMin) / (lightMax - lightMin)
lightOutput = int(lightPercent * (servoMax - servoMin) + servoMin)
humidPercent = (humidLow - humidMin) / (humidMax - humidMin)
humidOutput = int(humidPercent * (servoMax - servoMin) + servoMin)
pwm.setPWM(brightGreen, 0, 0)
pwm.setPWM(brightRed, 0, 4095)
pwm.setPWM(humidGreen, 0, 0)
pwm.setPWM(humidRed, 0, 4095)
pwm.setPWM(tempGreen, 0, 0)
pwm.setPWM(tempRed, 0, 4095)
pwm.setPWM(servoTemp, 0, tempOutput)
pwm.setPWM(servoHumid, 0, humidOutput)
pwm.setPWM(servoLight, 0, lightOutput)
time.sleep(intervalDisplay)
def lightsOff():
pwm.setPWM(brightRed, 0, 4095)
pwm.setPWM(humidRed, 0, 4095)
pwm.setPWM(tempRed, 0, 4095)
pwm.setPWM(brightGreen, 0, 4095)
pwm.setPWM(humidGreen, 0, 4095)
pwm.setPWM(tempGreen, 0, 4095)
def startup():
lightsOff()
# TURN ON RED LEDS FOR SERVO START-UP PROCEDURE
pwm.setPWM(brightRed, 0, 0)
pwm.setPWM(humidRed, 0, 0)
pwm.setPWM(tempRed, 0, 0)
time.sleep(3)
lightsOff()
pwm.setPWM(brightGreen, 0, 0)
pwm.setPWM(humidGreen, 0, 0)
pwm.setPWM(tempGreen, 0, 0)
time.sleep(5)
lightsOff()
startup()
while (True):
if(intervalCounter == intervalHighLow):
showHighs()
showLows()
lightsOff()
intervalCounter = 1
elif(intervalCounter < intervalHighLow):
intervalCounter += 1
# GET HUMIDITY AND TEMPERATURE READINGS FROM DHT22
humidity, temperature = Adafruit_DHT.read_retry(DHTsensor, DHTpin)
ldrValue = adc.readADCSingleEnded(0, gain, sps)
lightValue = (ldrValue - lightMin) / (lightMax - lightMin) * 100
# SCALE READINGS INTO OUTPUT VALUES
tempPercent = (temperature - tempMin) / (tempMax - tempMin)
tempOutput = int(tempPercent * (servoMax - servoMin) + servoMin)
humidPercent = (humidity - humidMin) / (humidMax - humidMin)
humidOutput = int(humidPercent * (servoMax - servoMin) + servoMin)
lightPercent = lightValue / 100
lightOutput = int(lightPercent * (servoMax - servoMin) + servoMin)
# CHECK FOR HIGH AND LOW VALUES
# HUMIDITY
if(humidity > humidHigh):
humidHigh = humidity
if(humidity < humidLow):
humidLow = humidity
# TEMPERATURE
if(temperature > tempHigh):
tempHigh = temperature
if(temperature < tempLow):
tempLow = temperature
# BRIGHTNESS
if(lightValue > lightHigh):
lightHigh = lightValue
if(lightValue < lightLow):
lightLow = lightValue
os.system('clear')
print "----- INPUTS ------"
print "Temperature: %d" % temperature
print "Humidity: %d" % humidity
print "Brightness: %d" % lightValue
print "----- OUTPUTS -----"
print "Temperature: %d" % tempOutput
print "Humidity: %d" % humidOutput
print "Brightness: %d" % lightOutput
print "----- HISTORY -----"
print " | Temperature | Humidity | Brightness "
print "High: | %.1f" % tempHigh + " degC | %.1f" % humidHigh + " %% | %.1f" % lightHigh + " %"
print "Low: | %.1f" % tempLow + " degC | %.1f" % humidLow + " %% | %.1f" % lightLow + " %"
print "------------------------------"
pwm.setPWM(servoTemp, 0, tempOutput)
pwm.setPWM(servoHumid, 0, humidOutput)
pwm.setPWM(servoLight, 0, lightOutput)
time.sleep(intervalSleep)
|
mit
| -341,734,205,532,914,300
| 24.77512
| 100
| 0.699462
| false
| 2.53984
| false
| false
| false
|
AntonovAlexander/activecore
|
designs/rtl/sigma/sw/benchmarks/mul_sw/hw_test_mul_sw.py
|
1
|
1185
|
# -*- coding:utf-8 -*-
from __future__ import division
import sys
sys.path.append('../../../../../rtl/udm/sw')
import time
import udm
from udm import *
sys.path.append('..')
import sigma
from sigma import *
def test_mul_sw(sigma, a, b):
sigma.tile.udm.wr32(0x6000, a)
sigma.tile.udm.wr32(0x6004, b)
corr_result = a * b
time.sleep(0.3)
led_val = sigma.udm.rd32(0x80000000)
if (led_val == corr_result):
print("CORRECT: ", a, " * ", b, " = ", corr_result)
return 1
else:
print("INCORRECT: ", a, " * ", b, " = ", corr_result, ", received: ", led_val)
return 0
def hw_test_mul_sw(sigma, mul_sw_filename):
print("#### MUL_SW TEST STARTED ####")
print("Loading test program...")
sigma.tile.loadelf(mul_sw_filename)
print("Test program written!")
test_succ_flag = 1
test_succ_flag &= test_mul_sw(sigma, 6, 7)
test_succ_flag &= test_mul_sw(sigma, 2, 10)
test_succ_flag &= test_mul_sw(sigma, 256, 256)
if (test_succ_flag):
print("#### MUL_SW TEST PASSED! ####")
else:
print("#### MUL_SW TEST FAILED! ####")
print("")
return test_succ_flag
|
apache-2.0
| -607,769,951,120,433,200
| 24.212766
| 86
| 0.55865
| false
| 2.977387
| true
| false
| false
|
gtt116/rabbitclient
|
rabbit.py
|
1
|
3469
|
#!/usr/bin/env python
"""
A Kombu based RabbitMQ server client
"""
import sys
import argparse
import json
import pprint
try:
from kombu.messaging import Producer
from kombu import Exchange, Queue, Connection
except ImportError:
print 'Please install kombu before running this script.'
print 'You can run it on Nova compute.'
sys.exit(1)
class RabbitClient(object):
def __init__(self, host, username='guest', password='guest'):
self.host = host
self.username = username
self.password = password
self._amqp_connection = 'amqp://%s:%s@%s' % (self.username,
self.password,
self.host)
self.conn = None
def _channel(self):
if not self.conn:
self.conn = Connection(self._amqp_connection)
return self.conn.channel()
def queue_delete(self, queue_name):
# NOTE(gtt): We can omit exchange and routing_key argument here
# queue = Queue(queue_name, exchange=exchange,
# routing_key=routing_key, channel=conn.channel())
queue = Queue(queue_name, channel=self._channel())
print "Deleting queue %s" % queue
return queue.delete()
def queue_purge(self, queue_name):
queue = Queue(queue_name, channel=self._channel())
print "Purging queue %s" % queue
return queue.purge()
def queue_get(self, queue_name, ack=True):
queue = Queue(queue_name, channel=self._channel())
msg = queue.get()
if not msg:
return None
if ack:
msg.ack()
return msg
def queue_publish(self, routing_key,
exchange_name, exchange_type='topic', body=None):
exchange = Exchange(name=exchange_name, type=exchange_type,
exclusive=False, durable=False, auto_delete=False)
p = Producer(self._channel(), exchange, routing_key=routing_key)
return p.publish(body)
def queue_get_print(self, queue_name):
msg = self.queue_get(queue_name)
if not msg:
print None
return
try:
print json.dumps(json.loads(msg.body), indent=2)
except ValueError:
print msg.body
def dispatch(self, action_name, args):
if action_name == 'queue-get':
return self.queue_get_print(args.queue_name)
if action_name == 'queue-delete':
return self.queue_delete(args.queue_name)
if action_name == 'queue-purge':
return self.queue_purge(args.queue_name)
raise ValueError("Method not support: %s" % action_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-H', '--host')
parser.add_argument('-u', '--username')
parser.add_argument('-p', '--password')
subparser = parser.add_subparsers(dest='action',
help='commands help')
delete_parser = subparser.add_parser('queue-delete')
delete_parser.add_argument('queue_name')
purge_parser = subparser.add_parser('queue-purge')
purge_parser.add_argument('queue_name')
get_parser = subparser.add_parser('queue-get')
get_parser.add_argument('queue_name')
args = parser.parse_args()
rabbit = RabbitClient(args.host, args.username, args.password)
print rabbit.dispatch(args.action, args)
|
apache-2.0
| -6,895,754,863,888,696,000
| 31.726415
| 78
| 0.593255
| false
| 3.973654
| false
| false
| false
|
aymeric-spiga/remap
|
py/archive/reduced.py
|
1
|
2347
|
#! /usr/bin/env python
import netCDF4 as nc
import sys
import math
import numpy as np
def from_reduced(N,M):
#"N elements from south to north and N elements around equator "
if gaussian:
hmax = 2*math.pi/N
hmin = hmax/2
nlon = N
cells_lon = []
cells_lat = []
for i in range(M/2):
lat1 = 180.0/M*i
lat2 = 180.0/M*(i+1)
print "yorgl",i,lat1
if gaussian:
y = math.sin(lat1*math.pi/180)
r = math.cos(lat1*math.pi/180)
h = 2.0*r/nlon
reduce_nlon = (h < hmin) and (i > 0) and (nlon > 4)
else:
reduce_nlon = False
if reduce_nlon:
nlon = nlon/2
for j in range(nlon):
lon1 = 360.0*j/nlon
lon2 = 360.0*(j+1)/nlon
bounds_lon = [lon1, lon1, lon2, lon2]
bounds_lat = [lat1, lat2, lat2, lat1]
if reduce_nlon:
bounds_lon.append((lon1+lon2)/2)
bounds_lat.append(lat1)
else: # close by netCDF convention
bounds_lon.append(bounds_lon[0])
bounds_lat.append(bounds_lat[0])
# northern hemisphere
cells_lon.append(bounds_lon)
cells_lat.append(bounds_lat)
# southern hemisphere
cells_lon.append(bounds_lon)
cells_lat.append(list(-np.array(bounds_lat))) # convert to array to negate elementwise
return np.array(cells_lon), np.array(cells_lat)
gaussian = True
gaussian = False
#for N in [64, 128, 256, 512]:
for N in [64]:
filename = "reduced" + str(N) + ".nc"
print "Generating: N =", N
lon, lat = from_reduced(N*2,N)
print lon.shape[0], "cells -> writing as ", filename
f = nc.Dataset(filename,'w')
f.createDimension('n_vert', 5)
f.createDimension('n_cell', lon.shape[0])
var = f.createVariable('lat', 'd', ('n_cell'))
var.setncattr("long_name", "latitude")
var.setncattr("units", "degrees_north")
var.setncattr("bounds", "bounds_lat")
var[:] = np.zeros(lon.shape[0])
var = f.createVariable('lon', 'd', ('n_cell'))
var.setncattr("long_name", "longitude")
var.setncattr("units", "degrees_east")
var.setncattr("bounds", "bounds_lon")
var[:] = np.zeros(lon.shape[0])
var = f.createVariable('bounds_lon', 'd', ('n_cell','n_vert'))
var[:] = lon
var = f.createVariable('bounds_lat', 'd', ('n_cell','n_vert'))
var[:] = lat
var = f.createVariable('val', 'd', ('n_cell'))
var.setncattr("coordinates", "lon lat")
var[:] = np.arange(lon.shape[0])
f.close()
|
gpl-2.0
| 2,083,614,771,329,507,000
| 26.290698
| 89
| 0.612697
| false
| 2.604883
| false
| false
| false
|
hip-odoo/odoo
|
addons/hr_attendance/models/hr_employee.py
|
15
|
7281
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from random import choice
from string import digits
from odoo import models, fields, api, exceptions, _, SUPERUSER_ID
class HrEmployee(models.Model):
_inherit = "hr.employee"
_description = "Employee"
def _default_random_pin(self):
return ("".join(choice(digits) for i in range(4)))
def _default_random_barcode(self):
barcode = None
while not barcode or self.env['hr.employee'].search([('barcode', '=', barcode)]):
barcode = "".join(choice(digits) for i in range(8))
return barcode
barcode = fields.Char(string="Badge ID", help="ID used for employee identification.", default=_default_random_barcode, copy=False)
pin = fields.Char(string="PIN", default=_default_random_pin, help="PIN used to Check In/Out in Kiosk Mode (if enabled in Configuration).", copy=False)
attendance_ids = fields.One2many('hr.attendance', 'employee_id', help='list of attendances for the employee')
last_attendance_id = fields.Many2one('hr.attendance', compute='_compute_last_attendance_id')
attendance_state = fields.Selection(string="Attendance", compute='_compute_attendance_state', selection=[('checked_out', "Checked out"), ('checked_in', "Checked in")])
manual_attendance = fields.Boolean(string='Manual Attendance', compute='_compute_manual_attendance', inverse='_inverse_manual_attendance',
help='The employee will have access to the "My Attendances" menu to check in and out from his session')
_sql_constraints = [('barcode_uniq', 'unique (barcode)', "The Badge ID must be unique, this one is already assigned to another employee.")]
@api.multi
def _compute_manual_attendance(self):
for employee in self:
employee.manual_attendance = employee.user_id.has_group('hr.group_hr_attendance') if employee.user_id else False
@api.multi
def _inverse_manual_attendance(self):
manual_attendance_group = self.env.ref('hr.group_hr_attendance')
for employee in self:
if employee.user_id:
if employee.manual_attendance:
manual_attendance_group.users = [(4, employee.user_id.id, 0)]
else:
manual_attendance_group.users = [(3, employee.user_id.id, 0)]
@api.depends('attendance_ids')
def _compute_last_attendance_id(self):
for employee in self:
employee.last_attendance_id = employee.attendance_ids and employee.attendance_ids[0] or False
@api.depends('last_attendance_id.check_in', 'last_attendance_id.check_out', 'last_attendance_id')
def _compute_attendance_state(self):
for employee in self:
employee.attendance_state = employee.last_attendance_id and not employee.last_attendance_id.check_out and 'checked_in' or 'checked_out'
@api.constrains('pin')
def _verify_pin(self):
for employee in self:
if employee.pin and not employee.pin.isdigit():
raise exceptions.ValidationError(_("The PIN must be a sequence of digits."))
@api.model
def attendance_scan(self, barcode):
""" Receive a barcode scanned from the Kiosk Mode and change the attendances of corresponding employee.
Returns either an action or a warning.
"""
employee = self.search([('barcode', '=', barcode)], limit=1)
return employee and employee.attendance_action('hr_attendance.hr_attendance_action_kiosk_mode') or \
{'warning': _('No employee corresponding to barcode %(barcode)s') % {'barcode': barcode}}
@api.multi
def attendance_manual(self, next_action, entered_pin=None):
self.ensure_one()
if not (entered_pin is None) or self.env['res.users'].browse(SUPERUSER_ID).has_group('hr_attendance.group_hr_attendance_use_pin') and (self.user_id and self.user_id.id != self._uid or not self.user_id):
if entered_pin != self.pin:
return {'warning': _('Wrong PIN')}
return self.attendance_action(next_action)
@api.multi
def attendance_action(self, next_action):
""" Changes the attendance of the employee.
Returns an action to the check in/out message,
next_action defines which menu the check in/out message should return to. ("My Attendances" or "Kiosk Mode")
"""
self.ensure_one()
action_message = self.env.ref('hr_attendance.hr_attendance_action_greeting_message').read()[0]
action_message['previous_attendance_change_date'] = self.last_attendance_id and (self.last_attendance_id.check_out or self.last_attendance_id.check_in) or False
action_message['employee_name'] = self.name
action_message['next_action'] = next_action
if self.user_id:
modified_attendance = self.sudo(self.user_id.id).attendance_action_change()
else:
modified_attendance = self.sudo().attendance_action_change()
action_message['attendance'] = modified_attendance.read()[0]
return {'action': action_message}
@api.multi
def attendance_action_change(self):
""" Check In/Check Out action
Check In: create a new attendance record
Check Out: modify check_out field of appropriate attendance record
"""
if len(self) > 1:
raise exceptions.UserError(_('Cannot perform check in or check out on multiple employees.'))
action_date = fields.Datetime.now()
if self.attendance_state != 'checked_in':
vals = {
'employee_id': self.id,
'check_in': action_date,
}
return self.env['hr.attendance'].create(vals)
else:
attendance = self.env['hr.attendance'].search([('employee_id', '=', self.id), ('check_out', '=', False)], limit=1)
if attendance:
attendance.check_out = action_date
else:
raise exceptions.UserError(_('Cannot perform check out on %(empl_name)s, could not find corresponding check in. '
'Your attendances have probably been modified manually by human resources.') % {'empl_name': self.name, })
return attendance
@api.model_cr_context
def _init_column(self, column_name):
""" Initialize the value of the given column for existing rows.
Overridden here because we need to have different default values
for barcode and pin for every employee.
"""
if column_name not in ["barcode", "pin"]:
super(HrEmployee, self)._init_column(column_name)
else:
default_compute = self._fields[column_name].default
query = 'SELECT id FROM "%s" WHERE "%s" is NULL' % (
self._table, column_name)
self.env.cr.execute(query)
employee_ids = self.env.cr.fetchall()
for employee_id in employee_ids:
default_value = default_compute(self)
query = 'UPDATE "%s" SET "%s"=%%s WHERE id = %s' % (
self._table, column_name, employee_id[0])
self.env.cr.execute(query, (default_value,))
|
agpl-3.0
| 2,383,363,399,660,738,600
| 48.530612
| 210
| 0.631232
| false
| 4.036031
| false
| false
| false
|
cloud-ark/cloudark
|
client/fmcmds/call_server.py
|
1
|
14059
|
import gzip
import json
import os
import requests
import tarfile
import urllib2
resources_endpoint = "http://localhost:5002/resources"
resource_stacks_endpoint = "http://localhost:5002/resource_stacks"
environments_endpoint = "http://localhost:5002/environments"
apps_endpoint = "http://localhost:5002/apps"
containers_endpoint = "http://localhost:5002/containers"
SERVER_ERROR = "Something caused error in cloudark. Please submit bug report on cloudark github repo. "
SERVER_ERROR = SERVER_ERROR + "Attach logs from cld.log which is available in cloudark directory."
class TakeAction(object):
def __init__(self):
pass
def _make_tarfile(self, output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def _read_tarfile(self, tarfile_name):
with gzip.open(tarfile_name, "rb") as f:
contents = f.read()
return contents
def _delete_tarfile(self, tarfile_name, source_dir):
cwd = os.getcwd()
os.chdir(source_dir)
if os.path.exists(tarfile_name):
os.remove(tarfile_name)
os.chdir(cwd)
def _check_server(self):
try:
req = urllib2.Request(apps_endpoint)
urllib2.urlopen(req)
except Exception as e:
print("CloudARK server is not running. Please run ./start-cloudark.sh.")
exit()
def create_container(self, source_dir, cont_info):
self._check_server()
req = urllib2.Request(containers_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
cont_name = cont_info['cont_name']
tarfile_name = cont_name + ".tar"
self._make_tarfile(tarfile_name, source_dir)
tarfile_content = self._read_tarfile(tarfile_name)
cont_info['cont_tar_name'] = tarfile_name
cont_info['content'] = tarfile_content
cont_url = ''
try:
data = {'cont_info': cont_info}
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
cont_url = response.headers.get('location')
print("Request to create container %s accepted." % cont_name)
except Exception as e:
error = e.read()
print(error)
self._delete_tarfile(tarfile_name, source_dir)
def get_container(self, container_name):
self._check_server()
cont_url = containers_endpoint + "/" + container_name
req = urllib2.Request(cont_url)
cont_data = ''
try:
response = urllib2.urlopen(req)
cont_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("Container with name %s not found." % container_name)
return cont_data
def get_container_list(self):
self._check_server()
req = urllib2.Request(containers_endpoint)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
print("Error occurred in querying endpoint %s" % containers_endpoint)
print(e)
return data
def delete_container(self, cont_name):
self._check_server()
cont_url = containers_endpoint + "/" + cont_name
response = requests.delete(cont_url)
if response.status_code == 404:
print("Container with name %s not found." % cont_name)
if response.status_code == 202:
print("Request to delete container with name %s accepted." % cont_name)
if response.status_code == 303:
print("Request to delete container with name %s accepted." % cont_name)
print("*** Please delete the container image from GCR manually -- automation is not available for that yet.***")
return response
def deploy_app(self, app_path, app_info):
self._check_server()
source_dir = app_path
app_name = app_info['app_name']
tarfile_name = app_name + ".tar"
self._make_tarfile(tarfile_name, source_dir)
tarfile_content = self._read_tarfile(tarfile_name)
app_info['app_name'] = app_name
app_info['app_tar_name'] = tarfile_name
app_info['app_content'] = tarfile_content
data = {'app_info': app_info}
req = urllib2.Request(apps_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
app_url = ''
try:
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
app_url = response.headers.get('location')
print("Request to deploy %s application accepted." % app_name)
except Exception as e:
error = e.read()
print(error)
self._delete_tarfile(tarfile_name, source_dir)
return app_url
def get_app(self, app_name):
self._check_server()
app_url = apps_endpoint + "/" + app_name
req = urllib2.Request(app_url)
app_data = ''
try:
response = urllib2.urlopen(req)
app_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("App with name %s not found." % app_name)
return app_data
def get_app_logs(self, app_name):
self._check_server()
app_url = apps_endpoint + "/" + app_name + "/logs"
req = urllib2.Request(app_url)
logs_data = ''
try:
response = urllib2.urlopen(req)
logs_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("App with name %s not found." % app_name)
return logs_data
def delete_app(self, app_name):
self._check_server()
app_url = apps_endpoint + "/" + app_name
response = requests.delete(app_url)
if response.status_code == 404:
print("App with name %s not found." % app_name)
if response.status_code == 202:
print("Request to delete app with name %s accepted." % app_name)
if response.status_code == 303:
print("Request to delete app with name %s accepted." % app_name)
return response
def redeploy_app(self, app_path, app_info, app_name):
self._check_server()
app_id_url = apps_endpoint + "/" + app_name
source_dir = app_path
app_name = "app-redeploy-id-" + app_name
tarfile_name = app_name + ".tar"
self._make_tarfile(tarfile_name, source_dir)
tarfile_content = self._read_tarfile(tarfile_name)
app_info['app_tar_name'] = tarfile_name
app_info['app_content'] = tarfile_content
data = {'app_info': app_info}
app_url = ''
req = urllib2.Request(app_id_url)
req.add_header('Content-Type', 'application/octet-stream')
req.get_method = lambda: 'PUT'
try:
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
if response.code == 202:
print("Request to redeploy app with name %s accepted." % app_name)
app_url = response.headers.get('location')
except Exception as e:
if e.msg == 'NOT FOUND':
print("App with name %s not found." % app_name)
if e.msg == 'INTERNAL SERVER ERROR':
print(SERVER_ERROR)
return
self._delete_tarfile(tarfile_name, source_dir)
return app_url
def get_app_list(self):
self._check_server()
req = urllib2.Request(apps_endpoint)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
print("Error occurred in querying endpoint %s" % apps_endpoint)
print(e)
return data
# Functions for environment
def run_command(self, env_name, command_string):
self._check_server()
environment_command_endpoint = environments_endpoint + "/" + env_name + "/command"
req = urllib2.Request(environment_command_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
data = {'command_string': command_string,
'environment_name': env_name}
try:
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
response_data = response.fp.read()
resp_data_json = json.loads(response_data)
result = resp_data_json['data']
result_str = '\n'.join(result)
return result_str
except Exception as e:
if e.msg == 'NOT FOUND':
print("Environment with name %s not found." % env_name)
exit()
def create_environment(self, env_name, environment_def):
self._check_server()
req = urllib2.Request(environments_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
data = {'environment_def': environment_def,
'environment_name': env_name}
try:
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
print("Request to create environment %s accepted." % env_name)
except Exception as e:
if e.code == 503 or e.code == 500 or e.code == 412 or e.code == 400:
error = e.read()
print(error)
exit()
environment_url = response.headers.get('location')
return environment_url
def get_environment(self, env_name):
self._check_server()
env_url = environments_endpoint + "/" + env_name
req = urllib2.Request(env_url)
env_data = ''
try:
response = urllib2.urlopen(req)
env_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("Environment with name %s not found." % env_name)
exit()
return env_data
def delete_environment(self, env_name, force_flag=''):
self._check_server()
env_url = environments_endpoint + "/" + env_name
if force_flag:
env_url = environments_endpoint + "/" + env_name + "?force=" + force_flag
response = requests.delete(env_url)
if response.status_code == 404:
print("Environment with name %s not found." % env_name)
if response.status_code == 202 or response.status_code == 200:
print("Request to delete env with name %s accepted." % env_name)
if response.status_code == 412:
print("Environment cannot be deleted as there are applications still running on it.")
if response.status_code == 303:
print("Request to delete env with name %s accepted." % env_name)
print("*** Please delete the VPC network from Google cloud console that was created for this environment ***.")
print("*** Check: https://github.com/cloud-ark/cloudark/issues/101 for details. ***")
return response
def get_environment_list(self):
self._check_server()
req = urllib2.Request(environments_endpoint)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
print("Error occurred in querying endpoint %s" % environments_endpoint)
print(e)
return data
# Functions for Individual resource
def get_resources(self):
self._check_server()
req = urllib2.Request(resources_endpoint)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
print(e)
return data
def get_resources_for_environment(self, env_name):
self._check_server()
req = urllib2.Request(resources_endpoint + "?env_name=%s" % env_name)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("Environment with name %s not found." % env_name)
return data
def create_resource(self, resource_obj):
self._check_server()
req = urllib2.Request(resources_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
request_data = {'resource_info': resource_obj}
response = urllib2.urlopen(req, json.dumps(request_data,
ensure_ascii=True,
encoding='ISO-8859-1'))
resource_endpoint = response.headers.get('location')
print("Resource URL:%s" % resource_endpoint)
return resource_endpoint
def get_resource(self, resource_id):
self._check_server()
resource_endpoint = resources_endpoint + "/" + resource_id
req = urllib2.Request(resource_endpoint)
resource_data = ''
try:
response = urllib2.urlopen(req)
resource_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("Resource with resource-id %s not found." % resource_id)
return resource_data
def delete_resource(self, resource_id):
self._check_server()
resource_endpoint = resources_endpoint + "/" + resource_id
response = requests.delete(resource_endpoint)
if response.status_code == 404:
print("Resource with resource-id %s not found." % resource_id)
if response.status_code == 202:
print("Request to delete resource with resource-id %s accepted." % resource_id)
return response
|
apache-2.0
| 1,964,869,006,662,134,500
| 37.203804
| 124
| 0.580838
| false
| 4.056261
| false
| false
| false
|
ibmjstart/bluemix-python-eve-sample
|
macreduce/run.py
|
1
|
3837
|
#!/usr/bin/env python
"""Instantiates the Python Eve REST API Server.
Instantiates the Python Eve REST API Server for both local
and cloud (IBM Bluemix) execution. Provides a default catch-all
routing to provide API consumers with intentional responses
for all routes. Provides a redis cloud caching instance for
session management where desired.
"""
from settings import (REDIS_INSTANCE,
APP_HOST,
APP_PORT,
VCAP_CONFIG)
from flask.ext.bootstrap import Bootstrap
from eve import Eve
from eve_docs import eve_docs
from eve_swagger import swagger
from routes import home
from hooks.event import (before_returning_items,
after_returning_items)
from gevent import wsgi, monkey, socket
import os
from platform import python_version
__author__ = "Sanjay Joshi"
__copyright__ = "IBM Copyright 2015"
__credits__ = ["Sanjay Joshi"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Sanjay Joshi"
__email__ = "joshisa@us.ibm.com"
__status__ = "Prototype"
# Monkey Patching app behavior to make it greenlet non-blocking
# This is usually required by gevent for native bindings for things
# like Redis interactions, etc ...
monkey.patch_all()
socket.setdefaulttimeout(240)
# capture current working directory
PWD = os.environ.get("PWD")
# set static folder path for static data
static_folder = os.path.join(PWD, "macreduce/static")
# Detect if we are deployed within Bluemix or not and configure accordingly
if VCAP_CONFIG:
print('Welcome to Bluemix')
print('Running on Python version: ' + python_version())
app = Eve(static_folder=static_folder,
redis=REDIS_INSTANCE)
REDIS_INSTANCE.flushdb()
else:
print('We are not running in Bluemix! Dev Mode Enabled')
app = Eve(static_folder=static_folder,
redis=REDIS_INSTANCE)
print(' Enabling Debug ...')
app.debug = True
# Setup some default home page path rules for JSON and HTML
app.add_url_rule('/', 'index', home.index)
# app.add_url_rule('/<path:path>', 'nonresource', home.index)
# Setup a favicon url for the home page
app.add_url_rule('/favicon', 'favicon',
view_func=home.favicon, methods=['GET'])
app.add_url_rule('/populate', 'populate',
view_func=home.populate, methods=['GET'])
# Setup examples of event hooks
app.on_pre_GET_mac += \
before_returning_items
app.on_post_GET_mac += \
after_returning_items
app.config['SWAGGER_INFO'] = {
'title': 'Macreduce API',
'version': '1.0',
'description': 'Python-Eve Framework application backend deployed on IBM '
'Bluemix that provides a practical illustration of setting '
'up a python REST API to support mobile workloads and '
'integration with 3rd party API platforms.',
'termsOfService': 'Have fun and learn!',
'contact': {
'name': 'joshisa',
'url': 'http://ibm.biz/sanjay_joshi'
},
'license': {
'name': 'Apache 2.0',
'url': 'https://github.com/ibmjstart/bluemix-python-eve-sample/'
'blob/master/LICENSE',
}
}
# Bootstrap and start Flask app within the WSGI GEvent Process
if __name__ == '__main__':
# Required to enable the Eve-docs extension
Bootstrap(app)
# Example invocation for running the Flask Server by itself
# app.run(host=APP_HOST, port=int(APP_PORT))
# Register the Flask Eve-docs blueprint
app.register_blueprint(eve_docs, url_prefix='/docs')
# Register the Swagger Extension for Eve
app.register_blueprint(swagger)
# Starting the GEvent WSGI Server to host the Flask App
# GEvent should provide superior response times to the
# dev Flask server
ws = wsgi.WSGIServer((APP_HOST, int(APP_PORT)), app)
ws.serve_forever()
|
apache-2.0
| 7,938,279,015,738,166,000
| 32.955752
| 79
| 0.66823
| false
| 3.67177
| false
| false
| false
|
sbarton272/AcousticBarcodes-Explorations
|
barcodes/dxfwrite/examples/mtext.py
|
1
|
3720
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: examples for dxfwrite usage, see also tests for examples
# Created: 09.02.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
import sys
import os
try:
import dxfwrite
except ImportError:
# if dxfwrite is not 'installed' append parent dir of __file__ to sys.path
import os
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(curdir, os.path.pardir)))
import dxfwrite
from dxfwrite import DXFEngine as dxf
def textblock(mtext, x, y, rot, color=3, mirror=0):
dwg.add(dxf.line((x+50, y), (x+50, y+50), color=color))
dwg.add(dxf.line((x+100, y), (x+100, y+50), color=color))
dwg.add(dxf.line((x+150, y), (x+150, y+50), color=color))
dwg.add(dxf.line((x+50, y), (x+150, y), color=color))
dwg.add(dxf.mtext(mtext, (x+50, y), mirror=mirror, rotation=rot))
dwg.add(dxf.mtext(mtext, (x+100, y), mirror=mirror, rotation=rot,
halign=dxfwrite.CENTER))
dwg.add(dxf.mtext(mtext, (x+150, y), mirror=mirror, rotation=rot,
halign=dxfwrite.RIGHT))
dwg.add(dxf.line((x+50, y+25), (x+150, y+25), color=color))
dwg.add(dxf.mtext(mtext, (x+50, y+25), mirror=mirror, rotation=rot,
valign=dxfwrite.MIDDLE))
dwg.add(dxf.mtext(mtext, (x+100, y+25), mirror=mirror, rotation=rot,
valign=dxfwrite.MIDDLE, halign=dxfwrite.CENTER))
dwg.add(dxf.mtext(mtext, (x+150, y+25), mirror=mirror, rotation=rot,
valign=dxfwrite.MIDDLE, halign=dxfwrite.RIGHT))
dwg.add(dxf.line((x+50, y+50), (x+150, y+50), color=color))
dwg.add(dxf.mtext(mtext, (x+50, y+50), mirror=mirror,
valign=dxfwrite.BOTTOM, rotation=rot))
dwg.add(dxf.mtext(mtext, (x+100, y+50), mirror=mirror,
valign=dxfwrite.BOTTOM, rotation=rot,
halign=dxfwrite.CENTER))
dwg.add(dxf.mtext(mtext, (x+150, y+50), mirror=mirror,
valign=dxfwrite.BOTTOM, rotation=rot,
halign=dxfwrite.RIGHT))
def rotate_text(text, insert, parts=16, color=3):
delta = 360. / parts
for part in range(parts):
dwg.add(dxf.mtext(text, insert, rotation=(delta*part),
color=color, valign=dxfwrite.TOP))
name = "mtext.dxf"
dwg = dxf.drawing(name)
txt = "Das ist ein mehrzeiliger Text\nZeile 2\nZeile 3\nUnd eine lange lange" \
" ................ Zeile4"
textblock(txt, 0, 0, 0., color=1)
textblock(txt, 150, 0, 45., color=2)
textblock(txt, 300, 0, 90., color=3)
textblock(txt, 0, 70, 135., color=4)
textblock(txt, 150, 70, 180., color=5)
textblock(txt, 300, 70, 225., color=6)
txt = "MText Zeile 1\nMIRROR_X\nZeile 3"
textblock(txt, 0, 140, 0., color=4, mirror=dxfwrite.MIRROR_X)
textblock(txt, 150, 140, 45., color=5, mirror=dxfwrite.MIRROR_X)
textblock(txt, 300, 140, 90., color=6, mirror=dxfwrite.MIRROR_X)
txt = "MText Zeile 1\nMIRROR_Y\nZeile 3"
textblock(txt, 0, 210, 0., color=4, mirror=dxfwrite.MIRROR_Y)
textblock(txt, 150, 210, 45., color=5, mirror=dxfwrite.MIRROR_Y)
textblock(txt, 300, 210, 90., color=6, mirror=dxfwrite.MIRROR_Y)
textblock("Einzeiler 0 deg", 0, -70, 0., color=1)
textblock("Einzeiler 45 deg", 150, -70, 45., color=2)
textblock("Einzeiler 90 deg", 300, -70, 90., color=3)
txt = "--------------------------------------------------Zeile 1\n" \
"----------------- MTEXT MTEXT --------------------Zeile 2 zum Rotieren!\n" \
"--------------------------------------------------Zeile 3\n"
rotate_text(txt, (600, 100), parts=16, color=3)
dwg.save()
print("drawing '%s' created.\n" % name)
|
mit
| -6,541,401,395,220,813,000
| 39.879121
| 83
| 0.600538
| false
| 2.627119
| false
| false
| false
|
DutBright/scientificResearch
|
adminStaff/views.py
|
1
|
8772
|
# coding: UTF-8
'''
Created on 2014-06-07
Desc: adminStaff' view, includes home(manage), review report view
'''
import time
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.decorators import csrf
from backend.decorators import *
from const import *
from backend.logging import loginfo
from backend.utility import getContext
from adminStaff.forms import NewsForm,ObjectForm,TemplateNoticeMessageForm,DispatchForm,DispatchAddCollegeForm
from teacher.forms import ProjectBudgetInformationForm,ProjectBudgetAnnualForm, SettingForm, ProjectCreationTeacherForm
from common.forms import NoticeForm, SearchForm
from common.views import scheduleManage, financialManage,noticeMessageSettingBase,scheduleManage,finalReportViewWork,fundBudgetViewWork,fileUploadManage,researchConcludingManage,getType
from adminStaff.models import TemplateNoticeMessage,News,ProjectSingle,HomePagePic
from users.models import SchoolProfile,CollegeProfile,ExpertProfile,Special,College
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def appView(request):
context = {}
return render(request, "adminStaff/application.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def allocManageView(request):
userauth = {
'role': 'adminStaff',
}
object_form = ObjectForm()
#special
special_list = []
user_special_info = {}
for i in Special.objects.all() :
special_list.append({'name':i.name, 'user':i.school_user, })
for i in SchoolProfile.objects.all():
user_special_info[i] = []
for i in special_list:
if i['user']:
user_special_info[i['user']].append(i['name'])
# college
college_list = []
user_college_info = {}
for i in College.objects.all() :
college_list.append({'name':i.name, 'user':i.college_user, })
for i in CollegeProfile.objects.all():
user_college_info[i] = []
for i in college_list:
if i['user']:
user_college_info[i['user']].append(i['name'])
instance_list = [
{
'object_chinese_name':"专题",
'object_name': "special",
'object_form': object_form,
'object_list': special_list,
'user_object_info':user_special_info,
},
{
'object_chinese_name':"学院",
'object_name': "college",
'object_form': object_form,
'object_list': college_list,
'user_object_info':user_college_info,
},
]
context = {
'instance_list': instance_list,
}
return render(request, "adminStaff/alloc_manage.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def scheduleView(request):
userauth = {
'role': 'adminStaff',
'status':'all'
}
return scheduleManage(request, userauth)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def deleteProject(request):
try:
iid=request.GET['iid']
print iid
project=ProjectSingle.objects.get(project_id=iid)
if project:
project.delete()
return HttpResponse('Success')
else:
return HttpResponse('Not exists')
except:
return HttpResponse('Invalid project_id')
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def newsRelease(request):
if request.method == "GET":
form = NewsForm()
else:
form = NewsForm(request.POST, request.FILES)
if form.is_valid():
form.save()
newsList = News.objects.all().order_by('-news_date')
context = getContext(newsList,1,"item",page_elems=7)
context.update({"newsform":NewsForm,
})
return render(request,"adminStaff/news_release.html",context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def noticeMessageSetting(request):
userauth={
"role":"adminStaff"
}
return noticeMessageSettingBase(request,userauth)
def dispatchView(request):
dispatch_form = DispatchForm()
dispatchAddCollege_form=DispatchAddCollegeForm()
college_users = CollegeProfile.objects.all()
expert_users = ExpertProfile.objects.all().order_by('college')
school_users = SchoolProfile.objects.all()
context = {
"dispatch_form":dispatch_form,
"dispatchAddCollege_form":dispatchAddCollege_form,
"search_form": SearchForm(),
}
context.update(getContext(school_users, 1, "item"))
context.update(getContext(college_users, 1, "item2"))
context.update(getContext(expert_users, 1, "item3"))
return render(request, "adminStaff/dispatch.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def financialView(request):
userauth = {
"role": 'adminStaff',
}
return financialManage(request, userauth)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def financialInfoView(request):
budgetinfoform = ProjectBudgetInformationForm()
budgetannuform = ProjectBudgetAnnualForm()
context = {
'budgetinfoform':budgetinfoform,
'budgetannuform':budgetannuform,
}
return render(request,"adminStaff/project_financial_info.html",context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def infoModifyView(request):
context = {}
return render(request, "adminStaff/teacher_info_modify.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def infoExportView(request):
context = {
'EXCELTYPE_DICT':EXCELTYPE_DICT_OBJECT(),
}
return render(request, "adminStaff/infoexport.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def finalInfoView(request,pid):
project = ProjectSingle.objects.filter(project_id = pid)
context = {
'project_list':project,
'role':'adminStaff',
}
return render(request, "adminStaff/finalinfo.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
@check_submit_status()
def finalReportView(request,pid,is_submited={}):
print "YYA" * 10
context = finalReportViewWork(request,pid,is_submited[SUBMIT_STATUS_FINAL])
context = dict(context, **fileUploadManage(request, pid,is_submited))
context['is_submited'] = is_submited
context['user'] = "adminStaff"
loginfo(p=is_submited,label="is_submited")
# if context['redirect']:
# return HttpResponseRedirect('/teacher/finalinfo')
return render(request,"adminStaff/final.html",context)
# def fileUploadManageView(request, pid, is_submited = False):
# context = fileUploadManage(request, pid)
# context['user'] = "teacher"
# # is_submited = False
# context['is_submited'] = is_submited
# return render(request, "teacher/file_upload.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
@check_submit_status()
def fundBudgetView(request,pid,is_submited={}):
context = fundBudgetViewWork(request,pid,is_submited[SUBMIT_STATUS_FINAL])
context['role'] = 'adminStaff'
if context['redirect']:
return HttpResponseRedirect('/adminStaff/finalinfo/'+str(pid))
return render(request,"adminStaff/fundbudget.html",context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
@check_submit_status()
def fileUploadManageView(request, pid, is_submited={}):
context = fileUploadManage(request, pid, is_submited)
context['user'] = "adminStaff"
# is_submited = False
context['is_submited'] = is_submited
return render(request, "adminStaff/file_upload.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def homepic_import_view(request):
"""
project group member change
"""
if request.method == "POST":
f = request.FILES["file"]
ftype = getType(f.name)
new_pic = HomePagePic()
new_pic.pic_obj = f
new_pic.name = f.name
new_pic.file_type = ftype
new_pic.uploadtime = time.strftime('%Y-%m-%d %X', time.localtime(time.time()))
new_pic.file_size = f.size
new_pic.save()
file_history = HomePagePic.objects.all()
loginfo(file_history.count())
data = {'files': file_history,
}
return render(request, 'adminStaff/home_pic_import.html', data)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def createProject(request):
"""
project group member change
"""
return render(request, 'adminStaff/create_project.html', {'form': ProjectCreationTeacherForm()})
|
agpl-3.0
| -821,464,746,761,467,800
| 30.52518
| 185
| 0.684961
| false
| 3.495812
| false
| false
| false
|
rudhir-upretee/Sumo17_With_Netsim
|
tools/traci/constants.py
|
1
|
16975
|
"""
@file constants.py
This script contains TraCI constant definitions from <SUMO_HOME>/src/traci-server/TraCIConstants.h
generated by "rebuildConstants.py" on 2012-12-03 12:37:11.425000.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2009-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
# ****************************************
# VERSION
# ****************************************
TRACI_VERSION = 5
# ****************************************
# COMMANDS
# ****************************************
# command: get version
CMD_GETVERSION = 0x00
# command: simulation step
CMD_SIMSTEP2 = 0x02
# command: stop node
CMD_STOP = 0x12
# command: set lane
CMD_CHANGELANE = 0x13
# command: slow down
CMD_SLOWDOWN = 0x14
# command: change target
CMD_CHANGETARGET = 0x31
# command: add vehicle
CMD_ADDVEHICLE = 0x74
# command: close sumo
CMD_CLOSE = 0x7F
# command: subscribe induction loop (e1) context
CMD_SUBSCRIBE_INDUCTIONLOOP_CONTEXT = 0x80
# response: subscribe induction loop (e1) context
RESPONSE_SUBSCRIBE_INDUCTIONLOOP_CONTEXT = 0x90
# command: get induction loop (e1) variable
CMD_GET_INDUCTIONLOOP_VARIABLE = 0xa0
# response: get induction loop (e1) variable
RESPONSE_GET_INDUCTIONLOOP_VARIABLE = 0xb0
# command: subscribe induction loop (e1) variable
CMD_SUBSCRIBE_INDUCTIONLOOP_VARIABLE = 0xd0
# response: subscribe induction loop (e1) variable
RESPONSE_SUBSCRIBE_INDUCTIONLOOP_VARIABLE = 0xe0
# command: subscribe areal detector (e3) context
CMD_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_CONTEXT = 0x81
# response: subscribe areal detector (e3) context
RESPONSE_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_CONTEXT = 0x91
# command: get multi-entry/multi-exit detector (e3) variable
CMD_GET_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE = 0xa1
# response: get areal detector (e3) variable
RESPONSE_GET_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE = 0xb1
# command: subscribe multi-entry/multi-exit detector (e3) variable
CMD_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE = 0xd1
# response: subscribe areal detector (e3) variable
RESPONSE_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE = 0xe1
# command: subscribe traffic lights context
CMD_SUBSCRIBE_TL_CONTEXT = 0x82
# response: subscribe traffic lights context
RESPONSE_SUBSCRIBE_TL_CONTEXT = 0x92
# command: get traffic lights variable
CMD_GET_TL_VARIABLE = 0xa2
# response: get traffic lights variable
RESPONSE_GET_TL_VARIABLE = 0xb2
# command: set traffic lights variable
CMD_SET_TL_VARIABLE = 0xc2
# command: subscribe traffic lights variable
CMD_SUBSCRIBE_TL_VARIABLE = 0xd2
# response: subscribe traffic lights variable
RESPONSE_SUBSCRIBE_TL_VARIABLE = 0xe2
# command: subscribe lane context
CMD_SUBSCRIBE_LANE_CONTEXT = 0x83
# response: subscribe lane context
RESPONSE_SUBSCRIBE_LANE_CONTEXT = 0x93
# command: get lane variable
CMD_GET_LANE_VARIABLE = 0xa3
# response: get lane variable
RESPONSE_GET_LANE_VARIABLE = 0xb3
# command: set lane variable
CMD_SET_LANE_VARIABLE = 0xc3
# command: subscribe lane variable
CMD_SUBSCRIBE_LANE_VARIABLE = 0xd3
# response: subscribe lane variable
RESPONSE_SUBSCRIBE_LANE_VARIABLE = 0xe3
# command: subscribe vehicle context
CMD_SUBSCRIBE_VEHICLE_CONTEXT = 0x84
# response: subscribe vehicle context
RESPONSE_SUBSCRIBE_VEHICLE_CONTEXT = 0x94
# command: get vehicle variable
CMD_GET_VEHICLE_VARIABLE = 0xa4
# response: get vehicle variable
RESPONSE_GET_VEHICLE_VARIABLE = 0xb4
# command: set vehicle variable
CMD_SET_VEHICLE_VARIABLE = 0xc4
# command: subscribe vehicle variable
CMD_SUBSCRIBE_VEHICLE_VARIABLE = 0xd4
# response: subscribe vehicle variable
RESPONSE_SUBSCRIBE_VEHICLE_VARIABLE = 0xe4
# command: subscribe vehicle type context
CMD_SUBSCRIBE_VEHICLETYPE_CONTEXT = 0x85
# response: subscribe vehicle type context
RESPONSE_SUBSCRIBE_VEHICLETYPE_CONTEXT = 0x95
# command: get vehicle type variable
CMD_GET_VEHICLETYPE_VARIABLE = 0xa5
# response: get vehicle type variable
RESPONSE_GET_VEHICLETYPE_VARIABLE = 0xb5
# command: set vehicle type variable
CMD_SET_VEHICLETYPE_VARIABLE = 0xc5
# command: subscribe vehicle type variable
CMD_SUBSCRIBE_VEHICLETYPE_VARIABLE = 0xd5
# response: subscribe vehicle type variable
RESPONSE_SUBSCRIBE_VEHICLETYPE_VARIABLE = 0xe5
# command: subscribe route context
CMD_SUBSCRIBE_ROUTE_CONTEXT = 0x86
# response: subscribe route context
RESPONSE_SUBSCRIBE_ROUTE_CONTEXT = 0x96
# command: get route variable
CMD_GET_ROUTE_VARIABLE = 0xa6
# response: get route variable
RESPONSE_GET_ROUTE_VARIABLE = 0xb6
# command: set route variable
CMD_SET_ROUTE_VARIABLE = 0xc6
# command: subscribe route variable
CMD_SUBSCRIBE_ROUTE_VARIABLE = 0xd6
# response: subscribe route variable
RESPONSE_SUBSCRIBE_ROUTE_VARIABLE = 0xe6
# command: subscribe poi context
CMD_SUBSCRIBE_POI_CONTEXT = 0x87
# response: subscribe poi context
RESPONSE_SUBSCRIBE_POI_CONTEXT = 0x97
# command: get poi variable
CMD_GET_POI_VARIABLE = 0xa7
# response: get poi variable
RESPONSE_GET_POI_VARIABLE = 0xb7
# command: set poi variable
CMD_SET_POI_VARIABLE = 0xc7
# command: subscribe poi variable
CMD_SUBSCRIBE_POI_VARIABLE = 0xd7
# response: subscribe poi variable
RESPONSE_SUBSCRIBE_POI_VARIABLE = 0xe7
# command: subscribe polygon context
CMD_SUBSCRIBE_POLYGON_CONTEXT = 0x88
# response: subscribe polygon context
RESPONSE_SUBSCRIBE_POLYGON_CONTEXT = 0x98
# command: get polygon variable
CMD_GET_POLYGON_VARIABLE = 0xa8
# response: get polygon variable
RESPONSE_GET_POLYGON_VARIABLE = 0xb8
# command: set polygon variable
CMD_SET_POLYGON_VARIABLE = 0xc8
# command: subscribe polygon variable
CMD_SUBSCRIBE_POLYGON_VARIABLE = 0xd8
# response: subscribe polygon variable
RESPONSE_SUBSCRIBE_POLYGON_VARIABLE = 0xe8
# command: subscribe junction context
CMD_SUBSCRIBE_JUNCTION_CONTEXT = 0x89
# response: subscribe junction context
RESPONSE_SUBSCRIBE_JUNCTION_CONTEXT = 0x99
# command: get junction variable
CMD_GET_JUNCTION_VARIABLE = 0xa9
# response: get junction variable
RESPONSE_GET_JUNCTION_VARIABLE = 0xb9
# command: set junction variable
CMD_SET_JUNCTION_VARIABLE = 0xc9
# command: subscribe junction variable
CMD_SUBSCRIBE_JUNCTION_VARIABLE = 0xd9
# response: subscribe junction variable
RESPONSE_SUBSCRIBE_JUNCTION_VARIABLE = 0xe9
# command: subscribe edge context
CMD_SUBSCRIBE_EDGE_CONTEXT = 0x8a
# response: subscribe edge context
RESPONSE_SUBSCRIBE_EDGE_CONTEXT = 0x9a
# command: get edge variable
CMD_GET_EDGE_VARIABLE = 0xaa
# response: get edge variable
RESPONSE_GET_EDGE_VARIABLE = 0xba
# command: set edge variable
CMD_SET_EDGE_VARIABLE = 0xca
# command: subscribe edge variable
CMD_SUBSCRIBE_EDGE_VARIABLE = 0xda
# response: subscribe edge variable
RESPONSE_SUBSCRIBE_EDGE_VARIABLE = 0xea
# command: subscribe simulation context
CMD_SUBSCRIBE_SIM_CONTEXT = 0x8b
# response: subscribe simulation context
RESPONSE_SUBSCRIBE_SIM_CONTEXT = 0x9b
# command: get simulation variable
CMD_GET_SIM_VARIABLE = 0xab
# response: get simulation variable
RESPONSE_GET_SIM_VARIABLE = 0xbb
# command: set simulation variable
CMD_SET_SIM_VARIABLE = 0xcb
# command: subscribe simulation variable
CMD_SUBSCRIBE_SIM_VARIABLE = 0xdb
# response: subscribe simulation variable
RESPONSE_SUBSCRIBE_SIM_VARIABLE = 0xeb
# command: subscribe GUI context
CMD_SUBSCRIBE_GUI_CONTEXT = 0x8c
# response: subscribe GUI context
RESPONSE_SUBSCRIBE_GUI_CONTEXT = 0x9c
# command: get GUI variable
CMD_GET_GUI_VARIABLE = 0xac
# response: get GUI variable
RESPONSE_GET_GUI_VARIABLE = 0xbc
# command: set GUI variable
CMD_SET_GUI_VARIABLE = 0xcc
# command: subscribe GUI variable
CMD_SUBSCRIBE_GUI_VARIABLE = 0xdc
# response: subscribe GUI variable
RESPONSE_SUBSCRIBE_GUI_VARIABLE = 0xec
# ****************************************
# POSITION REPRESENTATIONS
# ****************************************
# Position in geo-coordinates
POSITION_LAT_LON = 0x00
# 2D cartesian coordinates
POSITION_2D = 0x01
# Position in geo-coordinates with altitude
POSITION_LAT_LON_ALT = 0x02
# 3D cartesian coordinates
POSITION_3D = 0x03
# Position on road map
POSITION_ROADMAP = 0x04
# ****************************************
# DATA TYPES
# ****************************************
# Boundary Box (4 doubles)
TYPE_BOUNDINGBOX = 0x05
# Polygon (2*n doubles)
TYPE_POLYGON = 0x06
# unsigned byte
TYPE_UBYTE = 0x07
# signed byte
TYPE_BYTE = 0x08
# 32 bit signed integer
TYPE_INTEGER = 0x09
# float
TYPE_FLOAT = 0x0A
# double
TYPE_DOUBLE = 0x0B
# 8 bit ASCII string
TYPE_STRING = 0x0C
# list of traffic light phases
TYPE_TLPHASELIST = 0x0D
# list of strings
TYPE_STRINGLIST = 0x0E
# compound object
TYPE_COMPOUND = 0x0F
# color (four ubytes)
TYPE_COLOR = 0x11
# ****************************************
# RESULT TYPES
# ****************************************
# result type: Ok
RTYPE_OK = 0x00
# result type: not implemented
RTYPE_NOTIMPLEMENTED = 0x01
# result type: error
RTYPE_ERR = 0xFF
# return value for invalid queries (especially vehicle is not on the road)
INVALID_DOUBLE_VALUE = -1001.
# return value for invalid queries (especially vehicle is not on the road)
INVALID_INT_VALUE = -1
# ****************************************
# TRAFFIC LIGHT PHASES
# ****************************************
# red phase
TLPHASE_RED = 0x01
# yellow phase
TLPHASE_YELLOW = 0x02
# green phase
TLPHASE_GREEN = 0x03
# tl is blinking
TLPHASE_BLINKING = 0x04
# tl is off and not blinking
TLPHASE_NOSIGNAL = 0x05
# ****************************************
# DIFFERENT DISTANCE REQUESTS
# ****************************************
# air distance
REQUEST_AIRDIST = 0x00
# driving distance
REQUEST_DRIVINGDIST = 0x01
# ****************************************
# VEHICLE REMOVAL REASONS
# ****************************************
# vehicle started teleport
REMOVE_TELEPORT = 0x00
# vehicle removed while parking
REMOVE_PARKING = 0x01
# vehicle arrived
REMOVE_ARRIVED = 0x02
# vehicle was vaporized
REMOVE_VAPORIZED = 0x03
# vehicle finished route during teleport
REMOVE_TELEPORT_ARRIVED = 0x04
# ****************************************
# VARIABLE TYPES (for CMD_GET_*_VARIABLE)
# ****************************************
# list of instances' ids (get: all)
ID_LIST = 0x00
# count of instances (get: all)
ID_COUNT = 0x01
# subscribe object variables (get: all)
OBJECT_VARIABLES_SUBSCRIPTION = 0x02
# subscribe context variables (get: all)
SURROUNDING_VARIABLES_SUBSCRIPTION = 0x03
# last step vehicle number (get: induction loops, multi-entry/multi-exit detector, lanes, edges)
LAST_STEP_VEHICLE_NUMBER = 0x10
# last step vehicle number (get: induction loops, multi-entry/multi-exit detector, lanes, edges)
LAST_STEP_MEAN_SPEED = 0x11
# last step vehicle number (get: induction loops, multi-entry/multi-exit detector, lanes, edges)
LAST_STEP_VEHICLE_ID_LIST = 0x12
# last step occupancy (get: induction loops, lanes, edges)
LAST_STEP_OCCUPANCY = 0x13
# last step vehicle halting number (get: multi-entry/multi-exit detector, lanes, edges)
LAST_STEP_VEHICLE_HALTING_NUMBER = 0x14
# last step mean vehicle length (get: induction loops, lanes, edges)
LAST_STEP_LENGTH = 0x15
# last step time since last detection (get: induction loops)
LAST_STEP_TIME_SINCE_DETECTION = 0x16
# entry times
LAST_STEP_VEHICLE_DATA = 0x17
# traffic light states, encoded as rRgGyYoO tuple (get: traffic lights)
TL_RED_YELLOW_GREEN_STATE = 0x20
# index of the phase (set: traffic lights)
TL_PHASE_INDEX = 0x22
# traffic light program (set: traffic lights)
TL_PROGRAM = 0x23
# phase duration (set: traffic lights)
TL_PHASE_DURATION = 0x24
# controlled lanes (get: traffic lights)
TL_CONTROLLED_LANES = 0x26
# controlled links (get: traffic lights)
TL_CONTROLLED_LINKS = 0x27
# index of the current phase (get: traffic lights)
TL_CURRENT_PHASE = 0x28
# name of the current program (get: traffic lights)
TL_CURRENT_PROGRAM = 0x29
# controlled junctions (get: traffic lights)
TL_CONTROLLED_JUNCTIONS = 0x2a
# complete definition (get: traffic lights)
TL_COMPLETE_DEFINITION_RYG = 0x2b
# complete program (set: traffic lights)
TL_COMPLETE_PROGRAM_RYG = 0x2c
# assumed time to next switch (get: traffic lights)
TL_NEXT_SWITCH = 0x2d
# outgoing link number (get: lanes)
LANE_LINK_NUMBER = 0x30
# id of parent edge (get: lanes)
LANE_EDGE_ID = 0x31
# outgoing link definitions (get: lanes)
LANE_LINKS = 0x33
# list of allowed vehicle classes (get&set: lanes)
LANE_ALLOWED = 0x34
# list of not allowed vehicle classes (get&set: lanes)
LANE_DISALLOWED = 0x35
# speed (get: vehicle)
VAR_SPEED = 0x40
# maximum allowed/possible speed (get: vehicle types, lanes, set: edges, lanes)
VAR_MAXSPEED = 0x41
# position (2D) (get: vehicle, poi, set: poi)
VAR_POSITION = 0x42
# angle (get: vehicle)
VAR_ANGLE = 0x43
# angle (get: vehicle types, lanes, set: lanes)
VAR_LENGTH = 0x44
# color (get: vehicles, vehicle types, polygons, pois)
VAR_COLOR = 0x45
# max. acceleration (get: vehicle types)
VAR_ACCEL = 0x46
# max. deceleration (get: vehicle types)
VAR_DECEL = 0x47
# driver reaction time (get: vehicle types)
VAR_TAU = 0x48
# vehicle class (get: vehicle types)
VAR_VEHICLECLASS = 0x49
# emission class (get: vehicle types)
VAR_EMISSIONCLASS = 0x4a
# shape class (get: vehicle types)
VAR_SHAPECLASS = 0x4b
# minimum gap (get: vehicle types)
VAR_MINGAP = 0x4c
# width (get: vehicle types, lanes)
VAR_WIDTH = 0x4d
# shape (get: polygons)
VAR_SHAPE = 0x4e
# type id (get: vehicles, polygons, pois)
VAR_TYPE = 0x4f
# road id (get: vehicles)
VAR_ROAD_ID = 0x50
# lane id (get: vehicles)
VAR_LANE_ID = 0x51
# lane index (get: vehicles)
VAR_LANE_INDEX = 0x52
# route id (get & set: vehicles)
VAR_ROUTE_ID = 0x53
# edges (get: routes)
VAR_EDGES = 0x54
# filled? (get: polygons)
VAR_FILL = 0x55
# position (1D along lane) (get: vehicle)
VAR_LANEPOSITION = 0x56
# route (set: vehicles)
VAR_ROUTE = 0x57
# travel time information (get&set: vehicle)
VAR_EDGE_TRAVELTIME = 0x58
# effort information (get&set: vehicle)
VAR_EDGE_EFFORT = 0x59
# last step travel time (get: edge, lane)
VAR_CURRENT_TRAVELTIME = 0x5a
# signals state (get/set: vehicle)
VAR_SIGNALS = 0x5b
# new lane/position along (set: vehicle)
VAR_MOVE_TO = 0x5c
# driver imperfection (set: vehicle)
VAR_IMPERFECTION = 0x5d
# speed factor (set: vehicle)
VAR_SPEED_FACTOR = 0x5e
# speed deviation (set: vehicle)
VAR_SPEED_DEVIATION = 0x5f
# speed without TraCI influence (get: vehicle)
VAR_SPEED_WITHOUT_TRACI = 0xb1
# best lanes (get: vehicle)
VAR_BEST_LANES = 0xb2
# how speed is set (set: vehicle)
VAR_SPEEDSETMODE = 0xb3
# move vehicle, VTD version (set: vehicle)
VAR_MOVE_TO_VTD = 0xb4
# current CO2 emission of a node (get: vehicle, lane, edge)
VAR_CO2EMISSION = 0x60
# current CO emission of a node (get: vehicle, lane, edge)
VAR_COEMISSION = 0x61
# current HC emission of a node (get: vehicle, lane, edge)
VAR_HCEMISSION = 0x62
# current PMx emission of a node (get: vehicle, lane, edge)
VAR_PMXEMISSION = 0x63
# current NOx emission of a node (get: vehicle, lane, edge)
VAR_NOXEMISSION = 0x64
# current fuel consumption of a node (get: vehicle, lane, edge)
VAR_FUELCONSUMPTION = 0x65
# current noise emission of a node (get: vehicle, lane, edge)
VAR_NOISEEMISSION = 0x66
# current person number (get: vehicle)
VAR_PERSON_NUMBER = 0x67
VAR_BUS_STOP_WAITING = 0x67
# current time step (get: simulation)
VAR_TIME_STEP = 0x70
# number of loaded vehicles (get: simulation)
VAR_LOADED_VEHICLES_NUMBER = 0x71
# loaded vehicle ids (get: simulation)
VAR_LOADED_VEHICLES_IDS = 0x72
# number of departed vehicle (get: simulation)
VAR_DEPARTED_VEHICLES_NUMBER = 0x73
# departed vehicle ids (get: simulation)
VAR_DEPARTED_VEHICLES_IDS = 0x74
# number of vehicles starting to teleport (get: simulation)
VAR_TELEPORT_STARTING_VEHICLES_NUMBER = 0x75
# ids of vehicles starting to teleport (get: simulation)
VAR_TELEPORT_STARTING_VEHICLES_IDS = 0x76
# number of vehicles ending to teleport (get: simulation)
VAR_TELEPORT_ENDING_VEHICLES_NUMBER = 0x77
# ids of vehicles ending to teleport (get: simulation)
VAR_TELEPORT_ENDING_VEHICLES_IDS = 0x78
# number of arrived vehicles (get: simulation)
VAR_ARRIVED_VEHICLES_NUMBER = 0x79
# ids of arrived vehicles (get: simulation)
VAR_ARRIVED_VEHICLES_IDS = 0x7a
# delta t (get: simulation)
VAR_DELTA_T = 0x7b
# bounding box (get: simulation)
VAR_NET_BOUNDING_BOX = 0x7c
# minimum number of expected vehicles (get: simulation)
VAR_MIN_EXPECTED_VEHICLES = 0x7d
# add an instance (poi, polygon, vehicle, route)
ADD = 0x80
# remove an instance (poi, polygon)
REMOVE = 0x81
# convert coordinates
POSITION_CONVERSION = 0x82
# distance between points or vehicles
DISTANCE_REQUEST = 0x83
# force rerouting based on travel time (vehicles)
CMD_REROUTE_TRAVELTIME = 0x90
# force rerouting based on effort (vehicles)
CMD_REROUTE_EFFORT = 0x91
# validates current route (vehicles)
VAR_ROUTE_VALID = 0x92
# zoom
VAR_VIEW_ZOOM = 0xa0
# view position
VAR_VIEW_OFFSET = 0xa1
# view schema
VAR_VIEW_SCHEMA = 0xa2
# view by boundary
VAR_VIEW_BOUNDARY = 0xa3
# screenshot
VAR_SCREENSHOT = 0xa5
# track vehicle
VAR_TRACK_VEHICLE = 0xa6
|
gpl-3.0
| -4,040,832,587,357,143,600
| 24.916031
| 98
| 0.729131
| false
| 2.953715
| false
| false
| false
|
vensder/itmo_python
|
nasledovanye3.py
|
1
|
1220
|
import random
class Transport:
def __init__(self, name, speed = 0, wheel_count = 0, mass = 0, color = (0,0,0)):
self.name = name
self.speed = speed
self.wheel_count = wheel_count
self.mass = mass
self.color = color
self.pos = 0
def drive(self, time):
self.pos += self.speed * time
#return self.pos
def show_pos(self):
print(self.name, ':', self.pos)
class Car(Transport):
def __init__(self, name):
super().__init__(name, speed = 120, wheel_count = 4, mass = 2, color = (0,255,0))
class Tank(Transport):
def __init__(self, name):
super().__init__(name, speed = 120, wheel_count = 4, mass = 2, color = (0,255,0))
self.can_fire = True
class Airplane(Transport):
def __init__(self, name):
super().__init__(name, speed = 800, wheel_count = 22, mass = 100,
color = (250,250,250))
self.wings_count = 2
self.tail = True
machines = [
Car('car-1'),
Tank('tank-1'),
Airplane('plane-1'),
Car('car-2'),
Tank('tank-2'),
Airplane('plane-2'),
]
for m in machines:
if hasattr(m, 'fire'):
m.fire()
for m in machines:
m.show_pos()
for m in machines:
time = random.randint(1, 150)
m.drive(10)
print('-'*20)
for m in machines:
m.show_pos()
|
gpl-3.0
| -2,407,283,530,971,650,000
| 16.941176
| 83
| 0.594262
| false
| 2.652174
| false
| false
| false
|
meisamhe/GPLshared
|
Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/scoping.py
|
1
|
1058
|
# @include
x, y, z = 'global-x', 'global-y', 'global-z'
def basic_scoping():
print(x) # global-x
y = 'local-y'
global z
z = 'local-z'
basic_scoping()
print(x, y, z) # global-x global-y local-z
def inner_outer_scoping():
def inner1():
print(x) # outer-x
def inner2():
x = 'inner2-x'
print(x) # inner2-x
def inner3():
nonlocal x
x = 'inner3-x'
print(x) # inner3-x
x = "outer-x"
inner1(), inner2(), inner3()
print(x) # inner3-x
inner_outer_scoping()
print(x, y, z) # global-x global-y local-z
def outer_scope_error():
def inner():
try:
x = x + 321
except NameError:
print('Error: x is local, and so x + 1 is not defined yet')
x = 123
inner()
outer_scope_error() # prints 'Error: ...'
def outer_scope_array_no_error():
def inner():
x[0] = -x[0] # x[0] isn't a variable, it's resolved from outer x.
x = [314]
inner()
print(x[0]) # -314
outer_scope_array_no_error()
# @exclude
|
gpl-3.0
| -7,386,681,169,818,829,000
| 18.592593
| 74
| 0.52741
| false
| 2.798942
| false
| false
| false
|
the-duck/launcher-next
|
src/duck/launcher/XlibStuff.py
|
1
|
1626
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#########
#########
#Copyright (C) 2014-2015 Mark Spurgeon <theduck.dev@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########
import Xlib
from Xlib import display as D
import sys
from PyQt4 import QtGui, QtCore
import Config
def fix_window(winId,left,right,top,bottom):
set = False
while set == False:
try:
window = reserveSpace(winId)
if window != None:
window.now(left,right,top,bottom)
set = True
else:
self.sleep(1)
except:
raise
class reserveSpace():
def __init__(self, winId):
self._display = D.Display()
self._win = self._display.create_resource_object('window', winId)
def now(self, left,right,top,bottom):
self._win.change_property(self._display.intern_atom('_NET_WM_STRUT'), self._display.intern_atom('CARDINAL'),32, [left,right,top,bottom])
self._display.sync()
|
gpl-2.0
| 6,072,618,563,806,798,000
| 35.954545
| 138
| 0.636531
| false
| 3.737931
| false
| false
| false
|
alefnula/perart
|
src/perart/forms.py
|
1
|
1510
|
__author__ = 'Viktor Kerkez <alefnula@gmail.com>'
__contact__ = 'alefnula@gmail.com'
__date__ = '20 April 2010'
__copyright__ = 'Copyright (c) 2010 Viktor Kerkez'
import logging
from django import forms
from django.conf import settings
from google.appengine.api import mail
# perart imports
from perart import models
class PerArtForm(forms.ModelForm):
tinymce = True
class ProgramForm(PerArtForm):
class Meta:
model = models.Program
exclude = ['url']
class ProjectForm(PerArtForm):
class Meta:
model = models.Project
exclude = ['url']
class NewsForm(PerArtForm):
class Meta:
model = models.News
exclude = ['url']
class MenuForm(PerArtForm):
tinymce = False
class Meta:
model = models.Menu
exclude = ['url']
class GalleryForm(PerArtForm):
class Meta:
model = models.Gallery
exclude = ['url']
class NewsletterForm(forms.Form):
name = forms.CharField(required=True)
email = forms.EmailField(required=True)
def send_email(self):
try:
mail.send_mail(sender='perart.office@gmail.com',
to=settings.PERART_EMAIL,
subject='"%(name)s" se prijavio za newsletter' % self.cleaned_data,
body='Ime: %(name)s\nEmail: %(email)s' % self.cleaned_data)
return True
except:
logging.exception('sending message failed')
return False
|
gpl-3.0
| -3,145,847,521,591,563,000
| 22.984127
| 94
| 0.602649
| false
| 3.756219
| false
| false
| false
|
OCA/purchase-workflow
|
purchase_landed_cost/wizard/import_invoice_line.py
|
1
|
1945
|
# Copyright 2014-2016 Tecnativa - Pedro M. Baeza
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3
from odoo import api, fields, models
class ImportInvoiceLine(models.TransientModel):
_name = "import.invoice.line.wizard"
_description = "Import supplier invoice line"
supplier = fields.Many2one(
comodel_name='res.partner', string='Supplier', required=True,
domain="[('supplier', '=', True)]")
invoice = fields.Many2one(
comodel_name='account.invoice', string="Invoice", required=True,
domain="[('partner_id', '=', supplier), ('type', '=', 'in_invoice'),"
"('state', 'in', ['open', 'paid'])]")
invoice_line = fields.Many2one(
comodel_name='account.invoice.line', string="Invoice line",
required=True, domain="[('invoice_id', '=', invoice)]")
expense_type = fields.Many2one(
comodel_name='purchase.expense.type', string='Expense type',
required=True)
@api.multi
def action_import_invoice_line(self):
self.ensure_one()
dist_id = self.env.context['active_id']
distribution = self.env['purchase.cost.distribution'].browse(dist_id)
currency_from = self.invoice_line.currency_id
amount = self.invoice_line.price_subtotal
currency_to = distribution.currency_id
company = distribution.company_id or self.env.user.company_id
cost_date = distribution.date or fields.Date.today()
expense_amount = currency_from._convert(amount, currency_to, company,
cost_date)
self.env['purchase.cost.distribution.expense'].create({
'distribution': dist_id,
'invoice_line': self.invoice_line.id,
'invoice_id': self.invoice_line.invoice_id.id,
'ref': self.invoice_line.name,
'expense_amount': expense_amount,
'type': self.expense_type.id,
})
|
agpl-3.0
| -5,586,129,776,587,690,000
| 44.232558
| 77
| 0.614396
| false
| 3.821218
| false
| false
| false
|
adsabs/adsabs-pyingest
|
pyingest/parsers/hstprop.py
|
1
|
5181
|
#!/usr/bin/env python
from past.utils import old_div
import sys
import math
import requests
class URLError(Exception):
pass
class RequestError(Exception):
pass
class DataError(Exception):
pass
class HSTParser(object):
# HSTParser will return a list of articles taken from a HST API
# (https://proper.stsci.edu/proper/adsProposalSearch/query)
def __init__(self):
self.errors = []
pass
def get_batch(self, api_token, api_url, **kwargs):
get_header = {'apiKey': api_token, 'Accept': 'text/plain',
'Content-type': 'application/json'}
buff = requests.get(api_url, headers=get_header, params=kwargs).json()
return buff
def get_records(self, url, **kwargs):
if url.find('adsProposalSearch') == -1:
raise URLError("This parser is only for the HST adsProposalSearch search.")
# if not kwargs.has_key('api_key'):
if 'api_key' not in kwargs:
raise RequestError('No API key provided to query the HST API.')
token = kwargs['api_key']
del kwargs['api_key']
buff = {}
# Store the value of maxRecords, if this was set
maxrecs = kwargs.get('maxRecords', 200)
# First get 1 record to determine the total amount of records
kwargs['maxRecords'] = 1
# Do the first query
try:
batch = self.get_batch(token, url, **kwargs)
except Exception as err:
raise URLError("Request to HST blew up: %s" % err)
# How many records are there?
totrecs = batch['query']['total']
# Store the first batch of records
records = batch['programs']
# How often do we need to paginate to get them all?
num_paginates = int(math.ceil(old_div(totrecs, (1.0 * maxrecs))))
# If we run in test mode, do not paginate
if kwargs.get('test'):
num_paginates = 0
# We harvested the first record to get the total number of records,
# so we continue with the 2nd
offset = 1
kwargs['maxRecords'] = maxrecs
for i in range(num_paginates):
kwargs['offset'] = offset
try:
batch = self.get_batch(token, url, **kwargs)
except Exception as err:
raise URLError("Request to HST blew up: %s" % err)
records += batch['programs']
offset += maxrecs
return records
def is_complete(self, rec):
required_fields = ['bibstem', 'title', 'authorNames', 'date', 'link', 'comment', 'journalCode', 'affiliations', 'authorOrcidIdentifiers']
return all(elem in list(rec.keys()) for elem in required_fields)
def add_orcids(self, affs, orcids):
if len(affs) != len(orcids):
raise DataError('Affiliation and ORCID arrays have different lengths!')
afflist = []
for i in range(len(affs)):
if orcids[i]:
afflist.append('%s <ID system="ORCID">%s</ID>' % (affs[i], orcids[i].replace('http://orcid.org/', '')))
else:
afflist.append(affs[i])
return afflist
def parse(self, url, **kwargs):
hst_props = [{}]
# retrieve data from HST API
data = self.get_records(url, **kwargs)
# process the new records
for d in data:
if self.is_complete(d):
# The "journal field" is a composite from the "journalCode" and "comment" fields:
# 1. journalCode: expression of mission cycle ('HST Proposal. Cycle NN' or 'JWST Proposal. Cycle N')
# 2. comment: preformatted as 'HST Proposal#xxxx' or 'JWST Proposal#xxxx'
# What will go into the 'publication' field will have the form: HST Proposal. Cycle 26, ID. #15676
journal = "%s, ID. #%s" % (d['journalCode'], d['comment'].split('#')[-1])
# The ORCID information will have to be inserted into the affiliation information
try:
affils = self.add_orcids(d['affiliations'], d['authorOrcidIdentifiers'])
except DataError:
sys.stderr.write('Found misaligned affiliation/ORCID arrays: %s\n' % d['bibstem'])
self.errors.append('Found misaligned affiliation/ORCID arrays: %s' % d['bibstem'])
affils = d['affiliations']
hst_props.append({'bibcode': d['bibstem'],
'authors': d['authorNames'],
'affiliations': affils,
'title': d['title'],
'pubdate': d['date'],
'publication': journal,
'abstract': d['abstract'],
'properties': {'data': d['link']}})
else:
recid = d.get('comment') or d.get('bibstem')
sys.stderr.write('Found record with missing data: %s\n' % recid)
self.errors.append('Found record with missing data: %s' % recid)
continue
return hst_props
|
mit
| 8,031,092,040,686,377,000
| 40.448
| 145
| 0.550087
| false
| 3.997685
| false
| false
| false
|
radical-cybertools/radical.ensemblemd
|
src/radical/entk/execman/mock/resource_manager.py
|
1
|
3425
|
__copyright__ = 'Copyright 2017-2018, http://radical.rutgers.edu'
__author__ = 'Vivek Balasubramanian <vivek.balasubramaniana@rutgers.edu>'
__license__ = 'MIT'
# pylint: disable=unused-argument
from ..base.resource_manager import Base_ResourceManager
import radical.utils as ru
# ------------------------------------------------------------------------------
#
class ResourceManager(Base_ResourceManager):
'''
A resource manager takes the responsibility of placing resource requests on
different, possibly multiple, DCIs. This ResourceManager uses mocks an
implementation by doing nothing, it is only usable for testing.
:arguments:
:resource_desc: dictionary with details of the resource request and
access credentials of the user
:example: resource_desc = {
| 'resource' : 'xsede.stampede',
| 'walltime' : 120,
| 'cpus' : 64,
| 'project' : 'TG-abcxyz',
| 'queue' : 'abc', # optional
| 'access_schema' : 'ssh' # optional}
'''
# --------------------------------------------------------------------------
#
def __init__(self, resource_desc, sid, rts_config):
super(ResourceManager, self).__init__(resource_desc=resource_desc,
sid=sid,
rts='mock',
rts_config=rts_config)
# --------------------------------------------------------------------------
#
def get_resource_allocation_state(self):
'''
**Purpose**: get the state of the resource allocation
'''
try:
ru.raise_on(tag='resource_fail')
return 'Running'
except:
return 'Final'
# --------------------------------------------------------------------------
#
def get_completed_states(self):
'''
**Purpose**: test if a resource allocation was submitted
'''
return ['Final']
# --------------------------------------------------------------------------
#
def _validate_resource_desc(self):
'''
**Purpose**: validate the provided resource description
'''
return True
# --------------------------------------------------------------------------
#
def _populate(self):
'''
**Purpose**: evaluate attributes provided in the resource description
'''
return None
# --------------------------------------------------------------------------
#
def submit_resource_request(self, *args):
'''
**Purpose**: Create a resourceas per provided resource description
'''
return None
# --------------------------------------------------------------------------
#
def get_rts_info(self):
return None
# --------------------------------------------------------------------------
#
def _terminate_resource_request(self):
'''
**Purpose**: Cancel the resource
'''
return None
# ------------------------------------------------------------------------------
|
mit
| -5,283,086,850,979,547,000
| 29.309735
| 80
| 0.38219
| false
| 5.765993
| false
| false
| false
|
pratheekms/nlp-class
|
unit_test_5/feature_functions.py
|
1
|
8956
|
'''
feature_functions.py
Implements the feature generation mechanism
Author: Anantharaman Narayana Iyer
Date: 21 Nov 2014
'''
from nltk import sent_tokenize, word_tokenize
import nltk
import json
import numpy
import pickle
import datetime
from MyMaxEnt import MyMaxEnt
phones = ["phone", "phones", "smartphone", "smartphones", "mobile"]
org_list = ["Google", "Samsung", "HTC", "Sony", "Apple", "Micromax"]
class FeatureFunctions(object):
def __init__(self, wmap, tag_list):
self.wmap = wmap
self.supported_tags = tag_list
self.flist = [self.f1, self.f2, self.f3, self.f4, self.f5, self.f6, self.f7, self.f8, self.f9, self.f10, self.f11, self.f12, self.f13]
return
def f1(self, h, tag):
if (tag == 'Version' and h[1] == 'OS'):
return 1
else:
return 0
def f2(self, h, tag):
if (tag == 'Version' and h[1] == 'Other'):
return 1
else:
return 0
def f3(self, h, tag):
if (tag == 'Phone' and h[1] == 'Other'):
return 1
else:
return 0
def f4(self, h, tag):
if (tag == 'Org' and h[1] == 'Other'):
return 1
else:
return 0
def f5(self, h, tag):
if (tag == 'Date' and h[1] == 'Other'):
return 1
else:
return 0
def f6(self, h, tag):
if (tag == 'Location' and h[1] == 'Other'):
return 1
else:
return 0
def f7(self, h, tag):
if (tag == 'Size' and h[1] == 'Other'):
return 1
else:
return 0
def f8(self, h, tag):
if (tag == 'App' and h[1] == 'Other'):
return 1
else:
return 0
def f9(self, h, tag):
if (tag == 'Family' and h[1] == 'Other'):
return 1
else:
return 0
def f10(self, h, tag):
if (tag == 'Family' and h[1] == 'Org'):
return 1
else:
return 0
def f11(self, h, tag):
if (tag == 'Price' and h[1] == 'Other'):
return 1
else:
return 0
def f12(self, h, tag):
if (tag == 'Phone' and h[1] == 'Org'):
return 1
else:
return 0
def f13(self, h, tag):
if (tag == 'Phone' and h[1] == 'OS'):
return 1
else:
return 0
def f14(self, h, tag):
if (tag == 'App' and h[1] == 'App'):
return 1
else:
return 0
def f15(self, h, tag):
if (tag == 'Price' and h[1] == 'Price'):
return 1
else:
return 0
def f16(self, h, tag):
if (tag == 'Version' and h[1] == 'OS'):
return 1
else:
return 0
def f17(self, h, tag):
if (tag == 'Version' and h[1] == 'OS'):
return 1
else:
return 0
def f18(self, h, tag):
if (tag == 'Family' and h[0] == 'Org'):
return 1
else:
return 0
def f19(self, h, tag):
if (tag == 'Model' and h[0] == 'Org'):
return 1
else:
return 0
def f20(self, h, tag):
if (tag == 'Other' and h[0] == 'Other'):
return 1
else:
return 0
def f21(self, h, tag):
if (tag == 'Other' and h[1] == 'Other'):
return 1
else:
return 0
def f22(self, h, tag):
if (tag == 'Version' and h[0] == 'Org'):
return 1
else:
return 0
def f23(self, h, tag):
if (tag == 'Other' and h[0] == 'Date'):
return 1
else:
return 0
def f24(self, h, tag):
if (tag == 'Other' and h[0] == 'Place'):
return 1
else:
return 0
def f25(self, h, tag):
if (tag == 'Size' and h[0] == 'Other'):
return 1
else:
return 0
def f26(self, h, tag):
if (tag == 'Price' and h[0] == 'Other'):
return 1
else:
return 0
def f27(self, h, tag):
if (tag == 'Location' and h[0] == 'Other'):
return 1
else:
return 0
def f28(self, h, tag):
if (tag == 'Price' and h[0] == 'Date'):
return 1
else:
return 0
def f29(self, h, tag):
if (tag == 'Model' and h[0] == 'Other'):
return 1
else:
return 0
def f30(self, h, tag):
if (tag == 'OS' and h[0] == 'Org'):
return 1
else:
return 0
def f31(self, h, tag):
if (tag == 'Other' and h[0] == 'OS'):
return 1
else:
return 0
def f32(self, h, tag):
if (tag == 'Place' and h[0] == 'Version'):
return 1
else:
return 0
def f33(self, h, tag):
if (tag == 'Price' and h[0] == 'Version'):
return 1
else:
return 0
def f34(self, h, tag):
if (tag == 'Family' and h[0] == 'Date'):
return 1
else:
return 0
def f35(self, h, tag):
if (tag == 'Size' and h[0] == 'Phone'):
return 1
else:
return 0
def evaluate(self, xi, tag):
feats = []
for f in self.flist:
feats.append(int(f(xi, tag)))
return feats
def build_history(data_list, supported_tags):
history_list = [] # list of all histories
words_map = {}
count = 0
for data in data_list: # data is the inputs entered by a given student
data1 = data['data']
for rec in data1:
updates = rec['updates']
sent = rec['sentence']
words = []
for i in range(len(updates)):
words.append(updates[i]['word'])
#------------------------------------------------------------------------------------------------
# NOTE: below code is a temporary hack to build the MAxEnt for just 2 tags - we will change this later
if (updates[i]['tag'] not in supported_tags):
updates[i]['tag'] = "Other"
#------------------------------------------------------------------------------------------------
words_map[count] = {'words': words, 'pos_tags': nltk.pos_tag(words)}
for i in range(len(updates)):
history = {}
history["i"] = i
if i == 0:
history["ta"] = "*" # special tag
history["tb"] = "*" # special tag
elif i == 1:
history["ta"] = "*" # special tag
history["tb"] = updates[i - 1]['tag']
else:
history["ta"] = updates[i - 2]['tag']
history["tb"] = updates[i - 1]['tag']
history["wn"] = count
history_list.append((history, updates[i]['tag'], ))
count += 1
return (history_list, words_map)
def test(clf, history_list):
result = []
for history in history_list:
mymap = wmap[history[0]["wn"]]
words = mymap['words']
tags = mymap['pos_tags']
index = history[0]["i"]
val = clf.classify(history[0])
result.append({'predicted': val, 'word': words[index], 'expected': history[1]})
return result
if __name__ == "__main__":
#----- REPLACE THESE PATHS FOR YOUR SYSTEM ---------------------
json_file = r"C:\home\ananth\research\pesit\nlp\ner\all_data.json"
pickle_file = r"C:\home\ananth\research\pesit\nlp\ner\all_data.p"
# ----------------------------------------------------------------
TRAIN = int(raw_input("Enter 1 for Train, 0 to use pickeled file: "))
supported_tags = ["Org", "OS", "Version", "Other"]
tag_set = {"Org": 0, "Other": 1}
dims = 9
trg_data_x = []
trg_data_y = []
trg_data = {'Org': [], 'Other': []}
data = json.loads(open(json_file).read())['root']
print "num stu = ", len(data)
(history_list, wmap) = build_history(data, supported_tags)
print "After build_history"
func_obj = FeatureFunctions(wmap, supported_tags)
clf = MyMaxEnt(history_list, func_obj, reg_lambda = 0.001, pic_file = pickle_file)
print clf.model
if TRAIN == 1:
clf.train()
result = test(clf, history_list[-500:])
for r in result:
print r['word'], r['predicted'], r['expected']
|
mit
| -8,602,308,351,966,660,000
| 26.252366
| 142
| 0.423515
| false
| 3.76936
| false
| false
| false
|
PW-Sat2/PWSat2OBC
|
integration_tests/emulator/rtc.py
|
1
|
1580
|
from datetime import timedelta, datetime
import wx
from wx import xrc
from devices import RTCDevice
from .base import ModuleBase, bind
class RTCModule(ModuleBase):
GridPos = (1, 0)
def __init__(self, system):
self._system = system
self._rtc = system.rtc # type: RTCDevice
self.title = 'RTC'
self.grid_pos = (1, 2)
self.grid_span = (1, 1)
def load(self, res, parent):
self._panel = res.LoadPanel(parent, 'RTCModule')
self.bind_handlers()
self._time = xrc.XRCCTRL(self._panel, 'rtc_time')
def root(self):
return self._panel
def update(self):
t = self._rtc.response_time()
self._time.SetLabel('RTC time: \n' + t.strftime('%Y-%m-%d %H:%M:%S'))
@bind('rtc_start', wx.EVT_BUTTON)
def _on_start(self, evt):
self._rtc.start_running()
@bind('rtc_stop', wx.EVT_BUTTON)
def _on_stop(self, evt):
self._rtc.stop_running()
@bind('rtc_advance_5min', wx.EVT_BUTTON, args=(timedelta(minutes=5),))
def _on_advance(self, evt, interval):
self._rtc.advance_by(interval)
@bind('rtc_system_time', wx.EVT_BUTTON,)
def _on_use_system_time(self, evt):
self._rtc._current_time = datetime.now()
@bind('rtc_advance_value', wx.EVT_TEXT)
def _on_advance_time_value_changed(self, evt):
new_text = evt.EventObject.GetValue()
new_value = 1000
try:
new_value = int(new_text)
except:
pass
self._rtc._advance_time_interval = timedelta(milliseconds=new_value)
|
agpl-3.0
| 8,938,563,817,151,320,000
| 27.214286
| 77
| 0.593671
| false
| 3.291667
| false
| false
| false
|
lxml/lxml
|
versioninfo.py
|
1
|
2210
|
import io
import os
import re
import sys
__LXML_VERSION = None
def version():
global __LXML_VERSION
if __LXML_VERSION is None:
with open(os.path.join(get_base_dir(), 'src', 'lxml', '__init__.py')) as f:
__LXML_VERSION = re.search(r'__version__\s*=\s*"([^"]+)"', f.read(250)).group(1)
assert __LXML_VERSION
return __LXML_VERSION
def branch_version():
return version()[:3]
def is_pre_release():
version_string = version()
return "a" in version_string or "b" in version_string
def dev_status():
_version = version()
if 'a' in _version:
return 'Development Status :: 3 - Alpha'
elif 'b' in _version or 'c' in _version:
return 'Development Status :: 4 - Beta'
else:
return 'Development Status :: 5 - Production/Stable'
def changes():
"""Extract part of changelog pertaining to version.
"""
_version = version()
with io.open(os.path.join(get_base_dir(), "CHANGES.txt"), 'r', encoding='utf8') as f:
lines = []
for line in f:
if line.startswith('====='):
if len(lines) > 1:
break
if lines:
lines.append(line)
elif line.startswith(_version):
lines.append(line)
return ''.join(lines[:-1])
def create_version_h():
"""Create lxml-version.h
"""
lxml_version = version()
# make sure we have a triple part version number
parts = lxml_version.split('-')
while parts[0].count('.') < 2:
parts[0] += '.0'
lxml_version = '-'.join(parts).replace('a', '.alpha').replace('b', '.beta')
file_path = os.path.join(get_base_dir(), 'src', 'lxml', 'includes', 'lxml-version.h')
# Avoid changing file timestamp if content didn't change.
if os.path.isfile(file_path):
with open(file_path, 'r') as version_h:
if ('"%s"' % lxml_version) in version_h.read(100):
return
with open(file_path, 'w') as version_h:
version_h.write('''\
#ifndef LXML_VERSION_STRING
#define LXML_VERSION_STRING "%s"
#endif
''' % lxml_version)
def get_base_dir():
return os.path.abspath(os.path.dirname(sys.argv[0]))
|
bsd-3-clause
| -7,693,295,235,261,800,000
| 26.283951
| 92
| 0.567873
| false
| 3.480315
| false
| false
| false
|
exic/spade2
|
spade/pubsub.py
|
1
|
9884
|
from Behaviour import MessageTemplate, OneShotBehaviour
from xmpp.protocol import *
from xmpp.simplexml import Node
import uuid
def gen_id():
return str(uuid.uuid4())
#def PubSubMessageTemplate():
# msgs = []
# for ns in (NS_PUBSUB, NS_PUBSUB_OWNER):
# msg = Iq()
# msg.addChild(name='pubsub', namespace=ns)
# msgs.append(msg)
# return reduce(lambda a,b: a | b, map(lambda msg: MessageTemplate(msg), msgs))
#class XMPPIdTemplate(MessageTemplate):
# def __init__(self, id):
# iq = Iq()
# iq.setID(id)
# MessageTemplate.__init__(self, iq)
#TODO: Implementar retrieve nodes y discovery
class PubSub(object):
def __init__(self, agent): #, msgrecv):
self._client = agent.getAID().getName()
#self.msgrecv = msgrecv
self.myAgent = agent
self._server = agent.server
def _sendAndReceive(self, iq, getContents):
id = gen_id()
t = MessageTemplate(Iq(attrs={'id':id}))
iq.setID(id)
b = self._sendAndReceiveBehav(iq,getContents)
if self.myAgent._running:
self.myAgent.addBehaviour(b,t)
b.join()
else:
self.myAgent.runBehaviourOnce(b,t)
return b.result
class _sendAndReceiveBehav(OneShotBehaviour):
def __init__(self,iq,getContents):
OneShotBehaviour.__init__(self)
self.iq = iq
self.getContents = getContents
self.timeout = 15
self.result = (None,None)
def _process(self):
#print 'Sending ', str(self.iq)
self.myAgent.send(self.iq)
#Wait for the answer
msg = self._receive(block=True,timeout=self.timeout)
#print 'Received ', str(msg)
if msg is None:
#Timeout
self.result = ('error',['timeout'])
return
if msg['type'] == 'error':
errors = []
for error in msg.getTag('error').getChildren():
if error.getName() == 'text': continue
errors.append(error.getName())
self.result = ('error',errors)
return
if msg['type'] == 'result':
self.result = ('ok',self.getContents(msg))
return
self.result = ('error',['unknown'])
return
def publish(self, node, event=None):
"""
Publishes an item to a given node.
XXX: 'node' here is not an XML node, but the attribute for <publish>
@type node: string
@param node: The ID of the pubsub node to publish
@type event: Event
@param event: Content to publish
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok' the name of the created node.
"""
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB})
publish_node = Node(tag='publish', attrs={'node':node})
item_node = Node(tag='item')
if event is not None:
item_node.addChild(node=event)
publish_node.addChild(node=item_node)
pubsub_node.addChild(node=publish_node)
iq.addChild(node=pubsub_node)
def getContents(msg):
node_publish = msg.getTag('pubsub').getTag('publish')
#XXX: Server implementation always returns the item id, but XEP-60 does
# vim snot require it
return [node_publish['node'],node_publish.getTag('item')['id']]
return self._sendAndReceive(iq, getContents)
def subscribe(self, node, server=None, jid=None):
"""
Subscribes to the selected node
@type node: string
@param node: id of the node to delete
@type server: string
@param server: PubSub server
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok', an empty list.
"""
if server is None:
server = self._server
if jid is None:
jid = self._client
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client,
to=server
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB})
subscribe_node = Node(tag='subscribe', attrs={'node':node, 'jid':jid})
pubsub_node.addChild(node=subscribe_node)
iq.addChild(node=pubsub_node)
return self._sendAndReceive(iq, lambda msg: [])
def unsubscribe(self, node, server=None, jid=None):
"""
Unsubscribe from the selected node
@type node: string
@param node: id of the node to unsubscribe
@type server: string
@param server: PubSub server
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok' an empty list.
"""
if server is None:
server = self._server
if jid is None:
jid = self._client
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client,
to=server
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB_OWNER})
unsubscribe_node = Node(tag='unsubscribe', attrs={'node':node, 'jid':jid})
pubsub_node.addChild(node=unsubscribe_node)
iq.addChild(node=pubsub_node)
return self._sendAndReceive(iq, lambda msg: [])
def createNode(self, node, server=None, type='leaf', parent=None, access=None):
"""
Creates a node with the specified parameters.
@type node: string
@param node: The ID of the node to create
@type server: string
@param server: PubSub server
@type type: string
@param type: Type of the node: 'leaf' or 'collection'
@type parent: string
@param parent: id of the parent node. None if parent is root
@type access: string
@param acccess: Access model of the node
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok' the name of the created node.
"""
#TODO: Add suport for node configuration (RECOMMENDED in XEP-60)
if server is None:
server = self._server
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client,
to=server
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB})
create_node = Node(tag='create', attrs={} if node is None else {'node':node})
pubsub_node.addChild(node=create_node)
iq.addChild(node=pubsub_node)
if parent is not None or type=='collection' or access is not None:
field_nodes=[]
configure_node = Node(tag='configure')
field_nodes.append(DataField('FORM_TYPE', NS_PUBSUB+'#node_config','hidden'))
if parent is not None:
field_nodes.append(DataField('pubsub#collection',parent))
# <field var='pubsub#collection'><value>announcements</value></field>
if type == 'collection':
field_nodes.append(DataField('pubsub#node_type','collection'))
if access is not None:
field_nodes.append(DataField('pubsub#access_model',access))
x_node = DataForm(typ='submit',data=field_nodes)
configure_node.addChild(x_node)
pubsub_node.addChild(configure_node)
return self._sendAndReceive(iq, lambda msg:[msg.getTag('pubsub').getTag('create')['node']])
def createInstantNode(self, server=None, type='leaf', parent=None, access=None):
"""
Creates an instant node without a name. The server will generate id.
"""
if server is None:
server = self._server
return createNode(self, None, server, type, parent, access)
def deleteNode(self, node, server=None):
"""
Deletes the selected node.
@type node: string
@param node: id of the node to delete
@type server: string
@param server: PubSub server
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok' an empty list.
"""
#TODO: A method to redirect the subscriptions to the node to another one COULD be implemented
if server is None:
server = self._server
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client,
to=server,
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB_OWNER})
pubsub_node.addChild(name='delete', attrs={'node':node})
iq.addChild(node=pubsub_node)
return self._sendAndReceive(iq, lambda msg: [])
|
lgpl-2.1
| 4,256,735,416,115,895,300
| 31.620462
| 101
| 0.542493
| false
| 4.108063
| true
| false
| false
|
xiaocong/remote-task-http-server
|
devices.py
|
1
|
4107
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import Bottle, request, static_file, abort
import re
import time
import os
import subprocess
from io import BytesIO
try:
import PIL.Image as Image
except:
from PIL import Image
from jobs import lock
import adb
app = Bottle()
@app.get("/")
def devices():
result = {'android': []}
good_devices = adb.devices(status='good')
for se, name in adb.devices(status=request.params.get("status", "all")).items():
device = {'adb': {'serial': se, 'device': name}}
if se in good_devices:
props = adb.getprop(se)
device.update({
'product': {
'brand': props.get('ro.product.brand'),
'manufacturer': props.get('ro.product.manufacturer'),
'model': props.get('ro.product.model'),
'board': props.get('ro.product.board'),
'device': props.get('ro.product.device')
},
'locale': {
'language': props.get('ro.product.locale.language'),
'region': props.get('ro.product.locale.region')
},
'build': {
'fingerprint': props.get('ro.build.fingerprint'),
'type': props.get('ro.build.type'),
'date_utc': props.get('ro.build.date.utc'),
'display_id': props.get('ro.build.display.id'),
'id': props.get('ro.build.id'),
'version': {
'incremental': props.get('ro.build.version.incremental'),
'release': props.get('ro.build.version.release'),
'sdk': props.get('ro.build.version.sdk'),
'codename': props.get('ro.build.version.codename')
}
}
})
result['android'].append(device)
return result
@app.route("/<serial>/adb/<cmds:path>")
def adb_cmd(serial, cmds):
return adb.cmd(['-s', serial] + cmds.split("/"), timeout=request.params.get("timeout", 10))
def meminfo(serial):
result = {}
for line in adb.cmd(['-s', serial, 'shell', 'cat', '/proc/meminfo'])['stdout'].splitlines():
item = [i.strip() for i in line.split(':')]
if len(item) == 2:
values = item[1].split()
result[item[0]] = int(values[0])*1024 if len(values) == 2 and values[1] == 'kB' else int(values[0])
return result
def top(serial):
result = {"processes": []}
out = adb.cmd(['-s', serial, 'shell', 'top', '-n', '1'])['stdout']
m = re.search(r'User\s*(\d+)%,\s*System\s*(\d+)%,\s*IOW\s*(\d+)%,\s*IRQ\s*(\d+)%', out)
if m:
result["CPU"] = {"User": int(m.group(1))/100., "System": int(m.group(2))/100., "IOW": int(m.group(3))/100., "IRQ": int(m.group(4))/100.}
for item in re.findall(r'(\d+)\s+(\d+)\s+(\d+)%\s+(\w+)\s+(\d+)\s+(\d+)K\s+(\d+)K\s+(fg|bg)?\s+(\S+)\s+(\S+)', out):
pid, pr, cpu, s, thr, vss, rss, pcy, uid, name = item
result["processes"].append({"pid": int(pid), "pr": int(pr), "cpu": int(cpu)/100., "s": s, "thr": int(thr), "vss": int(vss)*1024, "rss": int(rss)*1024, "pcy": pcy, "uid": uid, "name": name})
return result
@app.get("/<serial>/stat")
def stat(serial):
return {"meminfo": meminfo(serial), "top": top(serial)}
@app.get("/<serial>/screenshot")
@lock
def screenshot(serial):
size = (int(request.params.get('width', 480)), int(request.params.get('height', 480)))
thumbnail = '%s(%dx%d).thumbnail.png' % (serial, size[0], size[1])
if not os.path.exists('/tmp/%s' % thumbnail) or time.time() - os.stat('/tmp/%s' % thumbnail).st_mtime > 5:
p1 = subprocess.Popen(["adb", "-s", serial, "shell", "screencap", "-p"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sed", "s/\r$//"], stdout=subprocess.PIPE, stdin=p1.stdout)
im = Image.open(BytesIO(p2.communicate()[0]))
im.thumbnail(size, Image.ANTIALIAS)
im.save('/tmp/%s' % thumbnail)
return static_file(thumbnail, root='/tmp')
|
mit
| 7,346,349,997,841,327,000
| 39.663366
| 197
| 0.528366
| false
| 3.369155
| false
| false
| false
|
bors-ltd/django-gitstorage
|
gitstorage/models.py
|
1
|
6291
|
# Copyright Bors LTD
# This file is part of django-gitstorage.
#
# Django-gitstorage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Django-gitstorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-gitstorage. If not, see <http://www.gnu.org/licenses/>.
import magic
from django.apps import apps as django_apps
from django.conf import settings
from django.contrib.auth import models as auth_models
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
from . import mimetypes
from . import utils
from . import validators
def get_blob_metadata_model():
"""
Returns the BlobMetadata model that is active in this project.
"""
try:
return django_apps.get_model(settings.GIT_STORAGE_BLOB_METADATA_MODEL)
except ValueError:
raise ImproperlyConfigured("GIT_STORAGE_BLOB_METADATA_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"GIT_STORAGE_BLOB_METADATA_MODEL refers to model '%s' that has not been installed" % (
settings.GIT_STORAGE_BLOB_METADATA_MODEL,
)
)
except AttributeError:
return BlobMetadata
def guess_mimetype(name=None, buffer=None):
mimetype = None
if name is not None:
mimetype = mimetypes.guess_type(name)[0]
# Mimetype guessing on name is not more accurate but more easily extensible
if mimetype is None and buffer is not None:
mimetype = magic.from_buffer(buffer, mime=True).decode()
return mimetype
class BaseObjectMetadata(models.Model):
id = models.CharField(_("id"), primary_key=True, unique=True, db_index=True, editable=False, max_length=40)
class Meta:
abstract = True
class TreeMetadata(BaseObjectMetadata):
class Meta:
managed = False
def __str__(self):
return "{0.id}".format(self)
class BaseBlobMetadata(BaseObjectMetadata):
# Cached properties to avoid loading the blob
size = models.PositiveIntegerField(verbose_name=_(u"Size"))
# Extra properties that must be optional (they are filled after the initial creation)
mimetype = models.CharField(_("mimetype"), max_length=255, null=True, blank=True)
def fill(self, repository, name, blob, **kwargs):
"""Method called after creation of the object to fill extra properties: mimetype, ...
Override to fill your own extra fields and call this parent.
"""
if self.mimetype is None:
self.mimetype = guess_mimetype(name=name, buffer=blob.data)
class Meta:
verbose_name = _("blob metadata")
verbose_name_plural = _("blob metadata")
abstract = True
def __str__(self):
return "{0.id} type={0.mimetype}".format(self)
class BlobMetadata(BaseBlobMetadata):
class Meta:
swappable = 'GIT_STORAGE_BLOB_METADATA_MODEL'
class TreePermissionQuerySet(models.QuerySet):
def current_permissions(self, path, **kwargs):
return self.filter(parent_path=path.parent_path, name=path.name, **kwargs).select_related('user')
def allowed_names(self, user, parent_path, **kwargs):
if user:
if user.is_superuser:
# Reads as no restriction
return None
if not user.is_authenticated():
user = None
return self.filter(parent_path=parent_path, user=user, **kwargs).values_list('name', flat=True)
def allowed_paths(self, user):
if user:
if user.is_superuser:
# Reads as no restriction
return None
if not user.is_authenticated():
user = None
all_permissions = self.filter(user=user).values_list('parent_path', 'name')
return ["/".join(filter(None, segments)) for segments in all_permissions]
def for_user(self, user, path, **kwargs):
if user:
if not user.is_authenticated():
user = None
return self.filter(user=user, parent_path=path.parent_path, name=path.name, **kwargs)
def other_permissions(self, user, path, **kwargs):
if user:
if not user.is_authenticated():
user = None
return self.filter(user=user, parent_path=path.parent_path, **kwargs).exclude(name=path.name).exists()
def is_allowed(self, user, path, **kwargs):
if user:
if user.is_superuser:
return True
return self.for_user(user, path, **kwargs).exists()
def add(self, users, path):
for user in users:
self.get_or_create(parent_path=path.parent_path, name=path.name, user=user)
def remove(self, users, path):
# Does not work for [None]
if None in users:
for user in users:
self.filter(parent_path=path.parent_path, name=path.name, user=user).delete()
else:
self.filter(parent_path=path.parent_path, name=path.name, user__in=users).delete()
class TreePermission(models.Model):
parent_path = models.CharField(_("parent path"), max_length=2048, db_index=True, blank=True,
validators=[validators.path_validator])
name = models.CharField(_("name"), max_length=256, db_index=True, blank=True,
validators=[validators.name_validator])
user = models.ForeignKey(auth_models.User, null=True, blank=True) # For anonymous user
objects = TreePermissionQuerySet.as_manager()
class Meta:
verbose_name = _("tree permission")
verbose_name_plural = _("tree permissions")
def __str__(self):
path = utils.Path(self.parent_path).resolve(self.name)
return "{0} on {1}".format(self.user, path)
|
gpl-3.0
| 749,146,428,825,357,800
| 34.948571
| 112
| 0.650135
| false
| 4.098371
| false
| false
| false
|
MarkusHackspacher/unknown-horizons
|
horizons/util/python/weakmethod.py
|
1
|
2225
|
# ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# team@unknown-horizons.org
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
import types
import weakref
class WeakMethod:
def __init__(self, function):
assert callable(function)
if isinstance(function, types.MethodType) and function.__self__ is not None:
self.function = function.__func__
self.instance = weakref.ref(function.__self__)
else:
self.function = function
self.instance = None
def __call__(self, *args, **kwargs):
if self.instance is None:
return self.function(*args, **kwargs)
elif self.instance() is not None:
return self.function(self.instance(), *args, **kwargs)
else:
raise ReferenceError("Instance: {} Function: {} Function from module: {}"
.format(self.instance(), self.function, self.function.__module__))
def __eq__(self, other):
if isinstance(other, WeakMethod):
if self.function != other.function:
return False
# check also if either instance is None or else if instances are equal
if self.instance is None:
return other.instance is None
else:
return self.instance() == other.instance()
elif callable(other):
return self == WeakMethod(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.instance, self.function))
def __str__(self):
return str(self.function)
|
gpl-2.0
| 867,174,227,849,065,700
| 32.208955
| 90
| 0.671461
| false
| 3.836207
| false
| false
| false
|
CSSIP-AIR/UMETRICS
|
collapse_persons/person_attribute.py
|
1
|
3929
|
################################################################################
# Copyright (c) 2013, AMERICAN INSTITUTES FOR RESEARCH
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import common, os, sys
sys.path.append(os.path.abspath('../Shared'))
import __string__ as s
def get_person_attributes(database, person_id):
"""A person can have more than one attribute. Get all the attributes for the person indicated."""
query = """
select
a.AttributeId,
pa.RelationshipCode,
a.Attribute,
coalesce(pas.RelationshipCodeWeight, 0.0) RelationshipCodeWeight
from
Attribute a
inner join PersonAttribute pa on
pa.AttributeId = a.AttributeId and
pa.PersonId = %d
left outer join UMETRICSSupport.PersonAttributeStatistics pas on
pas.RelationshipCode = pa.RelationshipCode;
""" % person_id
person_attributes = []
rows = database.get_all_rows(query)
for row in rows:
person_attribute = PersonAttribute(row[0], row[1], row[2], row[3])
if person_attribute.is_populated:
person_attributes.append(person_attribute)
return person_attributes
def any_attributes_match(database, person_attributes, candidate_person_id):
"""Check to see if any of the candidate person's attributes match the current person's"""
candidate_person_attributes = get_person_attributes(database, candidate_person_id)
for person_attribute in person_attributes:
for candidate_person_attribute in candidate_person_attributes:
if person_attribute.is_similar(candidate_person_attribute):
return True
return False
class PersonAttribute:
"""Simplistic class to hold a PersonAttribute"""
def __init__(self, id, relationship, attribute, weight):
self.id = id
self.relationship = common.standardize_text(relationship)
self.attribute = common.standardize_text(attribute)
self.weight = weight
self.concat = s.nullify_blanks(s.make_string([self.relationship, self.attribute]))
self.is_populated = self.concat is not None
def is_similar(self, other_person_attribute):
if not self.is_populated or not other_person_attribute.is_populated:
return False
if self.relationship != other_person_attribute.relationship:
return False
if self.id == other_person_attribute.id:
return True
return (self.weight > 0.9 and self.attribute == other_person_attribute.attribute)
# TODO: Add more sophisticated matching
|
bsd-2-clause
| -7,868,506,971,949,554,000
| 54.338028
| 757
| 0.686689
| false
| 4.722356
| false
| false
| false
|
pcmoritz/Strada.jl
|
deps/src/caffe/python/apollocaffe/layers/caffe_layers.py
|
1
|
7272
|
"""
List of layer classes for building protobuf layer parameters from python
"""
from .layer_headers import Layer, LossLayer, DataLayer
from .layer_helpers import assign_proto, Filler
from apollocaffe.proto import caffe_pb2
class CapSequence(Layer):
def __init__(self, name, sequence_lengths, **kwargs):
super(CapSequence, self).__init__(self, name, kwargs)
for x in sequence_lengths:
self.p.rp.cap_sequence_param.sequence_lengths.append(x)
class Concat(Layer):
def __init__(self, name, **kwargs):
super(Concat, self).__init__(self, name, kwargs)
class Convolution(Layer):
def __init__(self, name, kernel_dim, num_output, weight_filler=None, bias_filler=None, **kwargs):
kwargs['kernel_h'] = kernel_dim[0]
kwargs['kernel_w'] = kernel_dim[1]
kwargs['num_output'] = num_output
super(Convolution, self).__init__(self, name, kwargs)
if weight_filler is None:
weight_filler = Filler('xavier')
self.p.convolution_param.weight_filler.CopyFrom(weight_filler.filler_param)
if bias_filler is None:
bias_filler = Filler('constant', 0.)
self.p.convolution_param.bias_filler.CopyFrom(bias_filler.filler_param)
class Data(DataLayer):
def __init__(self, name, source, batch_size, transform=None, **kwargs):
kwargs['source'] = source
kwargs['batch_size'] = batch_size
super(Data, self).__init__(self, name, kwargs)
self.p.data_param.backend = caffe_pb2.DataParameter.LMDB
if transform is not None:
self.p.transform_param.CopyFrom(transform.transform_param)
class Dropout(Layer):
def __init__(self, name, dropout_ratio, **kwargs):
kwargs['dropout_ratio'] = dropout_ratio
super(Dropout, self).__init__(self, name, kwargs)
class DummyData(DataLayer):
def __init__(self, name, shape, **kwargs):
super(DummyData, self).__init__(self, name, kwargs)
assert len(shape) == 4
self.p.dummy_data_param.num.append(shape[0])
self.p.dummy_data_param.channels.append(shape[1])
self.p.dummy_data_param.height.append(shape[2])
self.p.dummy_data_param.width.append(shape[3])
class Eltwise(Layer):
def __init__(self, name, operation, **kwargs):
super(Eltwise, self).__init__(self, name, kwargs)
if operation == 'MAX':
self.p.eltwise_param.operation = caffe_pb2.EltwiseParameter.MAX
elif operation == 'SUM':
self.p.eltwise_param.operation = caffe_pb2.EltwiseParameter.SUM
elif operation == 'PROD':
self.p.eltwise_param.operation = caffe_pb2.EltwiseParameter.PROD
else:
raise ValueError('Unknown Eltwise operator')
class EuclideanLoss(LossLayer):
def __init__(self, name, **kwargs):
super(EuclideanLoss, self).__init__(self, name, kwargs)
class HDF5Data(DataLayer):
def __init__(self, name, source, batch_size, transform=None, **kwargs):
kwargs['source'] = source
kwargs['batch_size'] = batch_size
super(HDF5Data, self).__init__(self, name, kwargs)
if transform is not None:
self.p.transform_param.CopyFrom(transform.transform_param)
class ImageData(DataLayer):
def __init__(self, name, source, batch_size, transform=None, **kwargs):
kwargs['source'] = source
kwargs['batch_size'] = batch_size
super(ImageData, self).__init__(self, name, kwargs)
if transfrom is not None:
self.p.transform_param.CopyFrom(transform.transform_param)
class InnerProduct(Layer):
def __init__(self, name, num_output, weight_filler=None, bias_filler=None, **kwargs):
kwargs['num_output'] = num_output
super(InnerProduct, self).__init__(self, name, kwargs)
if weight_filler is None:
weight_filler = Filler('xavier')
self.p.inner_product_param.weight_filler.CopyFrom(weight_filler.filler_param)
if bias_filler is None:
bias_filler = Filler('constant', 0.)
self.p.inner_product_param.bias_filler.CopyFrom(bias_filler.filler_param)
class LRN(Layer):
def __init__(self, name, **kwargs):
super(LRN, self).__init__(self, name, kwargs)
class LstmUnit(Layer):
def __init__(self, name, num_cells, weight_filler=None, **kwargs):
super(LstmUnit, self).__init__(self, name, kwargs)
self.p.lstm_unit_param.num_cells = num_cells
if weight_filler is None:
weight_filler = Filler('uniform', 0.1)
self.p.lstm_unit_param.input_weight_filler.CopyFrom(
weight_filler.filler_param)
self.p.lstm_unit_param.input_gate_weight_filler.CopyFrom(
weight_filler.filler_param)
self.p.lstm_unit_param.forget_gate_weight_filler.CopyFrom(
weight_filler.filler_param)
self.p.lstm_unit_param.output_gate_weight_filler.CopyFrom(
weight_filler.filler_param)
class L1Loss(LossLayer):
def __init__(self, name, **kwargs):
super(L1Loss, self).__init__(self, name, kwargs)
class NumpyData(DataLayer):
def __init__(self, name, data, **kwargs):
super(NumpyData, self).__init__(self, name, kwargs)
from apollocaffe import make_numpy_data_param
import numpy as np
#self.p.rp.ParseFromString(make_numpy_data_param(np.array(data, dtype=np.float32)).SerializeToString())
self.p = make_numpy_data_param(self.p, np.array(data, dtype=np.float32))
class Pooling(Layer):
def __init__(self, name, pool='MAX', **kwargs):
super(Pooling, self).__init__(self, name, kwargs)
if pool is not None:
if pool == 'MAX':
self.p.pooling_param.pool = caffe_pb2.PoolingParameter.MAX
elif pool == 'AVE':
self.p.pooling_param.pool = caffe_pb2.PoolingParameter.AVE
elif pool == 'STOCHASTIC':
self.p.pooling_param.pool = caffe_pb2.PoolingParameter.STOCHASTIC
else:
raise ValueError('Unknown pooling method')
class Power(Layer):
def __init__(self, name, **kwargs):
super(Power, self).__init__(self, name, kwargs)
class ReLU(Layer):
def __init__(self, name, **kwargs):
super(ReLU, self).__init__(self, name, kwargs)
class Softmax(Layer):
def __init__(self, name, **kwargs):
super(Softmax, self).__init__(self, name, kwargs)
class SoftmaxWithLoss(LossLayer):
def __init__(self, name, **kwargs):
super(SoftmaxWithLoss, self).__init__(self, name, kwargs)
class Accuracy(Layer):
def __init__(self, name, **kwargs):
super(Accuracy, self).__init__(self, name, kwargs)
class Transpose(Layer):
def __init__(self, name, **kwargs):
super(Transpose, self).__init__(self, name, kwargs)
class Unknown(Layer):
def __init__(self, p):
self.p = p
class Wordvec(Layer):
def __init__(self, name, dimension, vocab_size, weight_filler=None, **kwargs):
kwargs['dimension'] = dimension
kwargs['vocab_size'] = vocab_size
super(Wordvec, self).__init__(self, name, kwargs)
if weight_filler is None:
weight_filler = Filler('uniform', 0.1)
self.p.wordvec_param.weight_filler.CopyFrom(weight_filler.filler_param)
|
bsd-2-clause
| -2,210,375,708,227,604,200
| 39.853933
| 111
| 0.630226
| false
| 3.476099
| false
| false
| false
|
mbiciunas/nix
|
test/config/test_config_tags.py
|
1
|
3679
|
# Nix
# Copyright (c) 2017 Mark Biciunas.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from config.config_tag import ConfigTag
from config.config_tags import ConfigTags
from utility.nix_error import NixError
class TestConfigTags:
_TAG_VALID_1 = "tag1"
_TAG_VALID_2 = "tag2"
_TAG_INVALID_1 = "bad_tag_1"
_TAG_INVALID_2 = "bad_tag_2"
_TAG_VALID_LIST = [_TAG_VALID_1, _TAG_VALID_2]
_TAG_INVALID_LIST = [_TAG_INVALID_1, _TAG_INVALID_2]
_TAG_MIX_LIST = [_TAG_INVALID_1, _TAG_VALID_1, _TAG_INVALID_2, _TAG_VALID_2]
def test_exist(self, config_valid):
_tags = config_valid.config().get_tags()
assert _tags.exist(self._TAG_VALID_1), "Tag not found: {}".format(self._TAG_VALID_1)
assert not _tags.exist(self._TAG_INVALID_1), "Non existing tag found: {}".format(self._TAG_INVALID_1)
def test_get_invalid_tags(self, config_valid):
_tags = config_valid.config().get_tags()
_result = _tags.get_invalid_tags(self._TAG_VALID_LIST)
assert len(_result) == 0, "Valid Tags found as invalid: {}".format(_result)
_result = _tags.get_invalid_tags(self._TAG_INVALID_LIST)
assert len(_result) == len(self._TAG_INVALID_LIST), "Invalid Tags found as valid: {}".format(_result)
_result = _tags.get_invalid_tags(self._TAG_MIX_LIST)
_correct = [x for x in self._TAG_MIX_LIST if x not in self._TAG_VALID_LIST]
assert len(_result) == len(_correct), "Mix of valid and invalid Tags wrong: {}".format(_result)
def test_insert(self, config_valid):
_tags = config_valid.config().get_tags()
_tag = _tags.insert()
assert type(_tag) is ConfigTag
assert _tag.get_name() is "", "Tag name should be none, contains: {}".format(_tag.get_name())
assert _tag.get_desc() is "", "Tag description should be none, contains: {}".format(_tag.get_desc())
def test_delete(self, config_valid):
_tags = config_valid.config().get_tags()
_tags.delete(self._TAG_VALID_1)
with pytest.raises(NixError):
_tags.delete(self._TAG_INVALID_1)
def test_list(self, config_valid):
_tags = config_valid.config().get_tags()
_tag_list = _tags.list()
assert len(_tag_list) == config_valid.get_count_tags()
def test_find(self, config_valid):
_tags = config_valid.config().get_tags()
_tag = _tags.find(self._TAG_VALID_1)
assert _tag.get_name() == self._TAG_VALID_1
with pytest.raises(NixError):
_tags.find(self._TAG_INVALID_1)
def test_export_data(self, config_valid):
_tags = config_valid.config().get_tags()
_export = _tags.export_data()
assert type(_export) == list
assert len(_export) == config_valid.get_count_tags()
def test_import_data(self, config_valid):
_tags = config_valid.config().get_tags()
_export = _tags.export_data()
_tags_new = ConfigTags()
_tags_new.import_data(_export)
assert len(_tags_new.list()) == config_valid.get_count_tags()
|
gpl-3.0
| -418,231,377,767,631,170
| 34.375
| 109
| 0.638761
| false
| 3.390783
| true
| false
| false
|
ethancaballero/neural-engineers-first-attempt
|
adaptive_attention.py
|
1
|
9391
|
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell, seq2seq
from utils import get_seq_length, _add_gradient_noise, _position_encoding, _xavier_weight_init, _last_relevant, batch_norm
#from https://github.com/DeNeutoy/act-rte-inference/blob/master/AdaptiveIAAModel.py
class Adaptive_Episodes_Config(object):
init_scale = 0.05
learning_rate = 0.001
max_grad_norm = 5
num_layers = 2
num_steps = 20
encoder_size = 128
inference_size = 256
max_epoch = 4
max_max_epoch = 3
keep_prob = 0.8
lr_decay = 0.8
batch_size = 32
vocab_size = 10000
bidirectional = False
embedding_size = 300
embedding_reg = 0.0001
train_embeddings = True
use_embeddings = False
eps = 0.1
max_computation = 20
step_penalty = 0.00001
#class AdaptiveIAAModel(object):
class Adaptive_Episodes(object):
""" Implements Iterative Alternating Attention for Machine Reading
http://arxiv.org/pdf/1606.02245v3.pdf """
def __init__(self, config, pretrained_embeddings=None,
update_embeddings=True, is_training=False):
self.config = config
def gate_mechanism(self, gate_input, scope):
with tf.variable_scope(scope):
if self.bidirectional:
size = 3*2*self.config.encoder_size + self.hidden_size
out_size = 2*self.config.encoder_size
else:
size = 3*self.config.encoder_size + self.hidden_size
out_size = self.config.encoder_size
hidden1_w = tf.get_variable("hidden1_w", [size, size])
hidden1_b = tf.get_variable("hidden1_b", [size])
hidden2_w = tf.get_variable("hidden2_w", [size, size])
hidden2_b = tf.get_variable("hidden2_b", [size])
sigmoid_w = tf.get_variable("sigmoid_w", [size, out_size])
sigmoid_b = tf.get_variable("sigmoid_b", [out_size])
if self.config.keep_prob < 1.0 and self.is_training:
gate_input = tf.nn.dropout(gate_input, self.config.keep_prob)
hidden1 = tf.nn.relu(tf.matmul(gate_input, hidden1_w) + hidden1_b)
if self.config.keep_prob < 1.0 and self.is_training:
hidden1 = tf.nn.dropout(hidden1, self.config.keep_prob)
hidden2 = tf.nn.relu(tf.matmul(hidden1, hidden2_w) + hidden2_b)
gate_output = tf.nn.sigmoid(tf.matmul(hidden2, sigmoid_w) + sigmoid_b)
return gate_output
def get_attention(self, prev_memory, fact_vec):
"""Use question vector and previous memory to create scalar attention for current fact"""
with tf.variable_scope("attention", reuse=True, initializer=_xavier_weight_init()):
W_1 = tf.get_variable("W_1")
b_1 = tf.get_variable("bias_1")
W_2 = tf.get_variable("W_2")
b_2 = tf.get_variable("bias_2")
features = [fact_vec*prev_memory, tf.abs(fact_vec - prev_memory)]
feature_vec = tf.concat(1, features)
attention = tf.matmul(tf.tanh(tf.matmul(feature_vec, W_1) + b_1), W_2) + b_2
return attention
def _attention_GRU_step(self, rnn_input, h, g):
"""Implement attention GRU as described by https://arxiv.org/abs/1603.01417"""
with tf.variable_scope("attention_gru", reuse=True, initializer=_xavier_weight_init()):
Wr = tf.get_variable("Wr")
Ur = tf.get_variable("Ur")
br = tf.get_variable("bias_r")
W = tf.get_variable("W")
U = tf.get_variable("U")
bh = tf.get_variable("bias_h")
r = tf.sigmoid(tf.matmul(rnn_input, Wr) + tf.matmul(h, Ur) + br)
h_hat = tf.tanh(tf.matmul(rnn_input, W) + r*tf.matmul(h, U) + bh)
rnn_output = g*h_hat + (1-g)*h
return rnn_output
#analogous to inference_step
def generate_episode(self, batch_mask, prob_compare, prob, counter, episode, fact_vecs, acc_states, counter_int, weight_container, bias_container):
"""Generate episode by applying attention to current fact vectors through a modified GRU"""
fact_vecs_t = tf.unpack(tf.transpose(fact_vecs, perm=[1,0,2]))
'''TRY REPLACING acc_states WITH episode AND SEE WHICH WORKS BETTER'''
attentions = [tf.squeeze(self.get_attention(acc_states, fv), squeeze_dims=[1]) for fv in fact_vecs_t]
attentions = tf.transpose(tf.pack(attentions))
softs = tf.nn.softmax(attentions)
softs = tf.split(1, self.max_input_len, softs)
gru_outputs = []
# set initial state to zero
h = tf.zeros((self.batch_size, self.hidden_size))
# use attention gru
for i, fv in enumerate(fact_vecs_t):
h = self._attention_GRU_step(fv, h, softs[i])
gru_outputs.append(h)
# extract gru outputs at proper index according to input_lens
gru_outputs = tf.pack(gru_outputs)
gru_outputs = tf.transpose(gru_outputs, perm=[1,0,2])
#analogous to output, new_state = self.inference_cell(input,state)
episode = _last_relevant(gru_outputs, self.input_len_placeholder)
''' # TARGET_SIDE ATTENTION
episode = self.generate_episode(prev_memory, fact_vecs, concat_all)
'''
p = tf.squeeze(tf.sigmoid(self.shared_linear_layer(episode, 1, True)))
new_batch_mask = tf.logical_and(tf.less(prob + p,self.one_minus_eps),batch_mask)
new_float_mask = tf.cast(new_batch_mask, tf.float32)
prob += p * new_float_mask
prob_compare += p * tf.cast(batch_mask, tf.float32)
'''based on github.com/tensorflow/tensorflow/issues/5608#issuecomment-260549420'''
#untied
Wt = weight_container.read(counter_int)
bt = bias_container.read(counter_int)
#tied
#Wt = weight_container.read(0)
#bt = bias_container.read(0)
counter_int+=1
def use_remainder():
remainder = tf.constant(1.0, tf.float32,[self.batch_size]) - prob
remainder_expanded = tf.expand_dims(remainder,1)
tiled_remainder = tf.tile(remainder_expanded,[1,self.hidden_size])
acc_state = tf.nn.relu(tf.matmul(tf.concat(1, [acc_states, episode * tiled_remainder]), Wt) + bt)
return acc_state
def normal():
p_expanded = tf.expand_dims(p * new_float_mask,1)
tiled_p = tf.tile(p_expanded,[1,self.hidden_size])
acc_state = tf.nn.relu(tf.matmul(tf.concat(1, [acc_states, episode * tiled_p]), Wt) + bt)
return acc_state
counter += tf.constant(1.0,tf.float32,[self.batch_size]) * new_float_mask
counter_condition = tf.less(counter,self.N)
condition = tf.reduce_any(tf.logical_and(new_batch_mask,counter_condition))
acc_state = tf.cond(condition, normal, use_remainder)
'''ADD MECHANISM TO INCREASE HALT PROB IF MULTIPLE SIMILAR ATTENTION MASKS IN A ROW;
would be the difference between consecutive attention masks
based on this cooment: reddit.com/r/MachineLearning/comments/59sfz8/research_learning_to_reason_with_adaptive/d9bgqxw/'''
return (new_batch_mask, prob_compare, prob, counter, episode, fact_vecs, acc_state, counter_int, weight_container, bias_container)
#analogous to do_inference_steps
def do_generate_episodes(self, prev_memory, fact_vecs, batch_size, hidden_size, max_input_len, input_len_placeholder, max_num_hops, epsilon, weight_container, bias_container):
self.batch_size = batch_size
self.hidden_size = hidden_size
self.max_input_len = max_input_len
self.input_len_placeholder = input_len_placeholder
counter_int=tf.constant(0)
self.shared_linear_layer = tf.make_template('shared_linear_layer', tf.nn.rnn_cell._linear)
self.one_minus_eps = tf.constant(1.0 - epsilon, tf.float32,[self.batch_size])
self.N = tf.constant(max_num_hops, tf.float32,[self.batch_size])
prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
self.counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
acc_states = tf.zeros_like(prev_memory, tf.float32, name="state_accumulator")
batch_mask = tf.constant(True, tf.bool,[self.batch_size])
# While loop stops when this predicate is FALSE.
# Ie all (probability < 1-eps AND counter < N) are false.
pred = lambda batch_mask, prob_compare, prob,\
counter, prev_memory, fact_vecs, acc_state, counter_int, weight_container, bias_container:\
tf.reduce_any(
tf.logical_and(
tf.less(prob_compare,self.one_minus_eps),
tf.less(counter,self.N)))
# only stop if all of the batch have passed either threshold
# Do while loop iterations until predicate above is false.
_,_,remainders,iterations,_,_,state,_,_,_ = \
tf.while_loop(pred, self.generate_episode,
[batch_mask, prob_compare, prob,
counter, prev_memory, fact_vecs, acc_states, counter_int, weight_container, bias_container])
return state, remainders, iterations
|
mit
| 8,711,310,250,242,839,000
| 39.658009
| 179
| 0.621127
| false
| 3.376843
| true
| false
| false
|
TAlonglong/trollduction-test
|
trollduction/tests/test_xml_read.py
|
1
|
4230
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Martin Raspaud
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test xml_read.py
"""
import unittest
xmlstuff = """<?xml version="1.0" encoding='utf-8'?>
<?xml-stylesheet type="text/xsl" href="prodlist2.xsl"?>
<!-- This config is used by Trollduction.-->
<product_config>
<metadata>
<platform>noaa</platform>
<number>15</number>
</metadata>
<common>
<output_dir>/tmp</output_dir>
</common>
<variables>
<output_dir id="local_sir">/local_disk/data/sir</output_dir>
<output_dir id="sir">/local_disk/data/out/sir</output_dir>
<output_dir id="rgb">/local_disk/data/out/rgb</output_dir>
<output_dir id="tmp">/tmp</output_dir>
</variables>
<product_list>
<!-- dump to netcdf -->
<!-- calibrated, satellite projection -->
<dump>
<file output_dir="sir" format="netcdf4">{time:%Y%m%d_%H%M}_{platform}{satnumber}.nc</file>
</dump>
<area id="eurol" name="Europe_large">
<!-- Generate the product only if sun is above the horizon at the
defined longitude/latitude -->
<product id="overview" name="overview" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="natural" name="dnc" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="green_snow" name="green_snow" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="red_snow" name="red_snow" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file format="png">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="cloudtop" name="cloudtop">
<file format="png">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<!-- Generate only if the Sun is below the horizon -->
<product id="night_overview" name="night_overview" sunzen_night_minimum="90" sunzen_lonlat="25, 60">
<file format="png">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="night_fog" name="night_fog" sunzen_night_minimum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
</area>
</product_list>
</product_config>
"""
from trollduction.xml_read import ProductList
from StringIO import StringIO
class TestProductList(unittest.TestCase):
# def test_vars(self):
# pconfig = ProductList(StringIO(xmlstuff))
# self.assertEquals(pconfig.vars,
# {'output_dir': {'local_sir': '/local_disk/data/sir',
# 'rgb': '/local_disk/data/out/rgb',
# 'sir': '/local_disk/data/out/sir',
# 'tmp': '/tmp'}})
# dump_item = pconfig.prodlist.findall('./dump/file')[0]
# self.assertEquals(dump_item.attrib["output_dir"],
# '/local_disk/data/out/sir')
pass
def suite():
"""The suite for test_xml_read
"""
loader = unittest.TestLoader()
mysuite = unittest.TestSuite()
mysuite.addTest(loader.loadTestsFromTestCase(TestProductList))
return mysuite
|
gpl-3.0
| 3,548,981,223,696,637,000
| 34.546218
| 106
| 0.623404
| false
| 3.317647
| true
| false
| false
|
mbusb/multibootusb
|
scripts/config.py
|
1
|
2054
|
#!/usr/bin/env python3
# Name: config.py
# Purpose: Module to share important variables between various modules. Mainly included so as not to call many
# functions again and again
# Authors: Sundar
# Licence: This file is a part of multibootusb package. You can redistribute it or modify
# under the terms of GNU General Public License, v.2 or above
iso_link = ""
usb_disk = None
usb_mount = ""
usb_uuid = ""
usb_label = ""
usb_details = ''
image_path = None
persistence = 0
persistence_available = False
persistence_max_size = 0
distro = ""
status_text = ""
percentage = 0
syslinux_version = ''
uninstall_distro_dir_name = ""
uninstall_distro_dir_path = ""
iso_file_list = ''
iso_bin_dir = ''
process_exist = None
yes = False
cli_dd = False
cli_syslinux = False
usb_gpt = ''
imager_iso_link = ""
imager_usb_disk_selected = ""
imager_lock = ""
imager_percentage = ""
imager_status_text = ""
imager_return = ""
install_size = ""
editors_linux = ["xdg-open", "gedit", "kate", "kwrite"]
editors_win = ["notepad++.exe", "notepad.exe"]
imager_usb_disk = []
remounted_partitions = []
debug = False
# protected_drives = ['C:','D:','E:', '/dev/sda', '/dev/sdb', '/dev/sdc']
# If turned off, qemu will be sought at a few preset locations
# first before deciding to use the bundled exe.
# Set 'qemu_exe_path' to explicitly specify.
qemu_use_builtin = True # Relevant on Windows only
# qemu_exe_path = r"C:\pkgs\qemu\qemu-system-x86_64.exe"
# Relevant on Windows only
# Enable QEMU accelaration by Intel HAXM hypervisor.
# Bundled QEMU does not support this.
# See https://www.qemu.org/2017/11/22/haxm-usage-windows/ for setup.
qemu_use_haxm = not qemu_use_builtin # Relevant on Windows only
# qemu_use_kvm = False
# qemu_bios = 'OVMF.fd'
def update_usb_mount(new_usb_details):
global usb_mount, usb_details
usb_mount = new_usb_details['mount_point'].replace('\\x20', ' ')
usb_details = new_usb_details
def add_remounted(usb_disk):
if usb_disk not in remounted_partitions:
remounted_partitions.append(usb_disk)
|
gpl-2.0
| -479,208,830,022,669,200
| 26.756757
| 111
| 0.689387
| false
| 2.938484
| false
| false
| false
|
kirmani/lockman
|
MC/pio.py
|
1
|
3202
|
import RPi.GPIO as GPIO
import requests
import time
import threading
import os
import base64
import string
import random
import datetime
"GPIO.setmode(BOARD)"
def closeLock():
p = GPIO.PWM(12,50) #sets pin 12 to PWM and sends 50 signals per second
p.start(7.5) #starts by sending a pulse at 7.5% to center the servo
p.ChangeDutyCycle(4.5) #sends a 4.5% pulse to turn the servo CCW
time.sleep(2)
p.stop()
def openLock():
p = GPIO.PWM(12,50) #sets pin 12 to PWM and sends 50 signals per second
p.start(7.5) #starts by sending a pulse at 7.5% to center the servo
p.ChangeDutyCycle(10.5) #sends a 4.5% pulse to turn the servo CCW
time.sleep(2)
p.stop()
def id_generator(size=25, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def checkStatus():
open = False
r = requests.put("http://api.codered.kirmani.io/lock/state", data = {"open": open})
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12,GPIO.OUT)
r = requests.get('http://api.codered.kirmani.io/lock/list')
while True:
if exit.is_set():
thread.exit()
list = r.json()["result"]
print list
for id in list:
url = "http://api.codered.kirmani.io/lock/id/"+id
r = requests.get(url)
if id == "OVERRIDE":
action = r.json()["result"]["action"]
if action == "open":
print "WOO"
r = requests.delete(url)
if not open:
openLock()
open = True
r = requests.put("http://api.codered.kirmani.io/lock/state", data = {"open": open})
if action == "close":
print "CLOSING"
r = requests.delete(url)
if open:
closeLock()
open = False
r = requests.put("http://api.codered.kirmani.io/lock/state", data = {"open": open})
else:
status = r.json()["result"]["approved"]
waiting = r.json()["result"]["waiting"]
if waiting == False:
if status == True:
print "WOO"
r = requests.delete(url)
if not open:
openLock()
open = True
r = requests.put("http://api.codered.kirmani.io/lock/state", data = {"open": open})
if status == False:
print "BOO"
r = requests.delete(url)
r = requests.get('http://api.codered.kirmani.io/lock/list')
def checkInput():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.IN)
input = GPIO.input(7);
while True:
if exit.is_set():
thread.exit()
input = GPIO.input(7);
while input == True:
input = GPIO.input(7);
#code to activate camera
timestamp = time.strftime("%d-%m-%Y_%H:%M:%S")
filename = "/home/pi/timerecord/" + timestamp + ".png"
os.system("fswebcam -d /dev/video0 -r 680x480 --no-banner " + filename)
encoded = base64.b64encode(open(filename, "rb").read())
random = id_generator()
r = requests.post("http://api.codered.kirmani.io/lock/id/" + random, data = {"image":encoded, "time": timestamp})
exit = threading.Event()
exit.clear()
status = threading.Thread(target=checkStatus)
input = threading.Thread(target=checkInput)
status.start()
input.start()
try:
while True:
x=1
except KeyboardInterrupt:
exit.set()
GPIO.cleanup()
|
mit
| 1,823,985,232,699,865,300
| 30.087379
| 115
| 0.625859
| false
| 2.932234
| false
| false
| false
|
beeftornado/sentry
|
src/sentry/shared_integrations/client.py
|
1
|
10459
|
from __future__ import absolute_import
import logging
import requests
import sentry_sdk
import six
from collections import OrderedDict
from django.core.cache import cache
from bs4 import BeautifulSoup
from django.utils.functional import cached_property
from requests.exceptions import ConnectionError, Timeout, HTTPError
from sentry.http import build_session
from sentry.utils import metrics, json
from sentry.utils.hashlib import md5_text
from sentry.utils.decorators import classproperty
from .exceptions import ApiHostError, ApiTimeoutError, ApiError, UnsupportedResponseType
class BaseApiResponse(object):
text = ""
def __init__(self, headers=None, status_code=None):
self.headers = headers
self.status_code = status_code
def __repr__(self):
return u"<%s: code=%s, content_type=%s>" % (
type(self).__name__,
self.status_code,
self.headers.get("Content-Type", "") if self.headers else "",
)
@cached_property
def rel(self):
if not self.headers:
return {}
link_header = self.headers.get("Link")
if not link_header:
return {}
return {item["rel"]: item["url"] for item in requests.utils.parse_header_links(link_header)}
@classmethod
def from_response(self, response, allow_text=False):
if response.request.method == "HEAD":
return BaseApiResponse(response.headers, response.status_code)
# XXX(dcramer): this doesnt handle leading spaces, but they're not common
# paths so its ok
if response.text.startswith(u"<?xml"):
return XmlApiResponse(response.text, response.headers, response.status_code)
elif response.text.startswith("<"):
if not allow_text:
raise ValueError(u"Not a valid response type: {}".format(response.text[:128]))
elif response.status_code < 200 or response.status_code >= 300:
raise ValueError(
u"Received unexpected plaintext response for code {}".format(
response.status_code
)
)
return TextApiResponse(response.text, response.headers, response.status_code)
# Some APIs will return JSON with an invalid content-type, so we try
# to decode it anyways
if "application/json" not in response.headers.get("Content-Type", ""):
try:
data = json.loads(response.text, object_pairs_hook=OrderedDict)
except (TypeError, ValueError):
if allow_text:
return TextApiResponse(response.text, response.headers, response.status_code)
raise UnsupportedResponseType(
response.headers.get("Content-Type", ""), response.status_code
)
else:
data = json.loads(response.text, object_pairs_hook=OrderedDict)
if isinstance(data, dict):
return MappingApiResponse(data, response.headers, response.status_code)
elif isinstance(data, (list, tuple)):
return SequenceApiResponse(data, response.headers, response.status_code)
else:
raise NotImplementedError
class TextApiResponse(BaseApiResponse):
def __init__(self, text, *args, **kwargs):
self.text = text
super(TextApiResponse, self).__init__(*args, **kwargs)
class XmlApiResponse(BaseApiResponse):
def __init__(self, text, *args, **kwargs):
self.xml = BeautifulSoup(text, "xml")
super(XmlApiResponse, self).__init__(*args, **kwargs)
class MappingApiResponse(dict, BaseApiResponse):
def __init__(self, data, *args, **kwargs):
dict.__init__(self, data)
BaseApiResponse.__init__(self, *args, **kwargs)
@property
def json(self):
return self
class SequenceApiResponse(list, BaseApiResponse):
def __init__(self, data, *args, **kwargs):
list.__init__(self, data)
BaseApiResponse.__init__(self, *args, **kwargs)
@property
def json(self):
return self
class BaseApiClient(object):
base_url = None
allow_text = False
allow_redirects = None
integration_type = None
log_path = None
datadog_prefix = None
cache_time = 900
def __init__(self, verify_ssl=True, logging_context=None):
self.verify_ssl = verify_ssl
self.logging_context = logging_context
@cached_property
def logger(self):
return logging.getLogger(self.log_path)
@classproperty
def name_field(cls):
return u"%s_name" % cls.integration_type
@classproperty
def name(cls):
return getattr(cls, cls.name_field)
def get_cache_prefix(self):
return u"%s.%s.client:" % (self.integration_type, self.name)
def track_response_data(self, code, span, error=None, resp=None):
metrics.incr(
u"%s.http_response" % (self.datadog_prefix),
sample_rate=1.0,
tags={self.integration_type: self.name, "status": code},
)
try:
span.set_http_status(int(code))
except ValueError:
span.set_status(code)
span.set_tag(self.integration_type, self.name)
extra = {
self.integration_type: self.name,
"status_string": six.text_type(code),
"error": six.text_type(error)[:256] if error else None,
}
extra.update(getattr(self, "logging_context", None) or {})
self.logger.info(u"%s.http_response" % (self.integration_type), extra=extra)
def build_url(self, path):
if path.startswith("/"):
if not self.base_url:
raise ValueError(u"Invalid URL: {}".format(path))
return u"{}{}".format(self.base_url, path)
return path
def _request(
self,
method,
path,
headers=None,
data=None,
params=None,
auth=None,
json=True,
allow_text=None,
allow_redirects=None,
timeout=None,
):
if allow_text is None:
allow_text = self.allow_text
if allow_redirects is None:
allow_redirects = self.allow_redirects
if allow_redirects is None: # is still None
allow_redirects = method.upper() == "GET"
if timeout is None:
timeout = 30
full_url = self.build_url(path)
metrics.incr(
u"%s.http_request" % self.datadog_prefix,
sample_rate=1.0,
tags={self.integration_type: self.name},
)
try:
with sentry_sdk.configure_scope() as scope:
parent_span_id = scope.span.span_id
trace_id = scope.span.trace_id
except AttributeError:
parent_span_id = None
trace_id = None
with sentry_sdk.start_transaction(
op=u"{}.http".format(self.integration_type),
name=u"{}.http_response.{}".format(self.integration_type, self.name),
parent_span_id=parent_span_id,
trace_id=trace_id,
sampled=True,
) as span:
try:
with build_session() as session:
resp = getattr(session, method.lower())(
url=full_url,
headers=headers,
json=data if json else None,
data=data if not json else None,
params=params,
auth=auth,
verify=self.verify_ssl,
allow_redirects=allow_redirects,
timeout=timeout,
)
resp.raise_for_status()
except ConnectionError as e:
self.track_response_data("connection_error", span, e)
raise ApiHostError.from_exception(e)
except Timeout as e:
self.track_response_data("timeout", span, e)
raise ApiTimeoutError.from_exception(e)
except HTTPError as e:
resp = e.response
if resp is None:
self.track_response_data("unknown", span, e)
self.logger.exception(
"request.error", extra={self.integration_type: self.name, "url": full_url}
)
raise ApiError("Internal Error", url=full_url)
self.track_response_data(resp.status_code, span, e)
raise ApiError.from_response(resp, url=full_url)
self.track_response_data(resp.status_code, span, None, resp)
if resp.status_code == 204:
return {}
return BaseApiResponse.from_response(resp, allow_text=allow_text)
# subclasses should override ``request``
def request(self, *args, **kwargs):
return self._request(*args, **kwargs)
def delete(self, *args, **kwargs):
return self.request("DELETE", *args, **kwargs)
def get_cached(self, path, *args, **kwargs):
query = ""
if kwargs.get("params", None):
query = json.dumps(kwargs.get("params"), sort_keys=True)
key = self.get_cache_prefix() + md5_text(self.build_url(path), query).hexdigest()
result = cache.get(key)
if result is None:
result = self.request("GET", path, *args, **kwargs)
cache.set(key, result, self.cache_time)
return result
def get(self, *args, **kwargs):
return self.request("GET", *args, **kwargs)
def patch(self, *args, **kwargs):
return self.request("PATCH", *args, **kwargs)
def post(self, *args, **kwargs):
return self.request("POST", *args, **kwargs)
def put(self, *args, **kwargs):
return self.request("PUT", *args, **kwargs)
def head(self, *args, **kwargs):
return self.request("HEAD", *args, **kwargs)
def head_cached(self, path, *args, **kwargs):
query = ""
if kwargs.get("params", None):
query = json.dumps(kwargs.get("params"), sort_keys=True)
key = self.get_cache_prefix() + md5_text(self.build_url(path), query).hexdigest()
result = cache.get(key)
if result is None:
result = self.head(path, *args, **kwargs)
cache.set(key, result, self.cache_time)
return result
|
bsd-3-clause
| 5,743,440,093,390,676,000
| 32.522436
| 100
| 0.574816
| false
| 4.140538
| false
| false
| false
|
ChromiumWebApps/chromium
|
tools/telemetry/telemetry/core/backends/chrome/extension_dict_backend.py
|
1
|
2602
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import re
from telemetry.core import extension_page
from telemetry.core.backends.chrome import inspector_backend
class ExtensionNotFoundException(Exception):
pass
class ExtensionDictBackend(object):
def __init__(self, browser_backend):
self._browser_backend = browser_backend
# Maps extension ids to ExtensionPage objects.
self._extension_dict = {}
def __getitem__(self, extension_id):
extension_object = self._extension_dict.get(extension_id)
if not extension_object:
extension_object = self._CreateExtensionObject(extension_id)
assert extension_object
self._extension_dict[extension_id] = extension_object
return extension_object
def __contains__(self, extension_id):
return extension_id in self.GetExtensionIds()
@staticmethod
def _ExtractExtensionId(url):
m = re.match(r"(chrome-extension://)([^/]+)", url)
assert m
return m.group(2)
@staticmethod
def _GetExtensionId(extension_info):
if 'url' not in extension_info:
return None
return ExtensionDictBackend._ExtractExtensionId(extension_info['url'])
def _CreateExtensionObject(self, extension_id):
extension_info = self._FindExtensionInfo(extension_id)
if not extension_info or not 'webSocketDebuggerUrl' in extension_info:
raise ExtensionNotFoundException()
return extension_page.ExtensionPage(
extension_id,
extension_info['url'],
self._CreateInspectorBackendForDebuggerUrl(
extension_info['webSocketDebuggerUrl']))
def _CreateInspectorBackendForDebuggerUrl(self, debugger_url):
return inspector_backend.InspectorBackend(self._browser_backend.browser,
self._browser_backend,
debugger_url)
def _FindExtensionInfo(self, extension_id):
for extension_info in self.GetExtensionInfoList():
if self._GetExtensionId(extension_info) == extension_id:
return extension_info
return None
def GetExtensionInfoList(self, timeout=None):
data = self._browser_backend.Request('', timeout=timeout)
return self._FilterExtensions(json.loads(data))
def _FilterExtensions(self, all_pages):
return [page_info for page_info in all_pages
if page_info['url'].startswith('chrome-extension://')]
def GetExtensionIds(self):
return map(self._GetExtensionId, self.GetExtensionInfoList())
|
bsd-3-clause
| -441,002,597,549,063,600
| 33.693333
| 76
| 0.701768
| false
| 4.258592
| false
| false
| false
|
mishbahr/django-users2
|
users/views.py
|
1
|
6157
|
from django.contrib import messages
from django.contrib.auth import get_user_model, login
from django.urls import reverse
from django.shortcuts import redirect, resolve_url
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from .compat import urlsafe_base64_decode
from .conf import settings
from .signals import user_activated, user_registered
from .utils import EmailActivationTokenGenerator, send_activation_email
try:
from django.contrib.sites.shortcuts import get_current_site
except ImportError: # pragma: no cover
from django.contrib.sites.models import get_current_site
if settings.USERS_SPAM_PROTECTION: # pragma: no cover
from .forms import RegistrationFormHoneypot as RegistrationForm
else:
from .forms import RegistrationForm
@csrf_protect
@never_cache
def register(request,
template_name='users/registration_form.html',
activation_email_template_name='users/activation_email.html',
activation_email_subject_template_name='users/activation_email_subject.html',
activation_email_html_template_name=None,
registration_form=RegistrationForm,
registered_user_redirect_to=None,
post_registration_redirect=None,
activation_from_email=None,
current_app=None,
extra_context=None):
if registered_user_redirect_to is None:
registered_user_redirect_to = getattr(settings, 'LOGIN_REDIRECT_URL')
if request.user.is_authenticated:
return redirect(registered_user_redirect_to)
if not settings.USERS_REGISTRATION_OPEN:
return redirect(reverse('users_registration_closed'))
if post_registration_redirect is None:
post_registration_redirect = reverse('users_registration_complete')
if request.method == 'POST':
form = registration_form(request.POST)
if form.is_valid():
user = form.save()
if settings.USERS_AUTO_LOGIN_AFTER_REGISTRATION:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
elif not user.is_active and settings.USERS_VERIFY_EMAIL:
opts = {
'user': user,
'request': request,
'from_email': activation_from_email,
'email_template': activation_email_template_name,
'subject_template': activation_email_subject_template_name,
'html_email_template': activation_email_html_template_name,
}
send_activation_email(**opts)
user_registered.send(sender=user.__class__, request=request, user=user)
return redirect(post_registration_redirect)
else:
form = registration_form()
current_site = get_current_site(request)
context = {
'form': form,
'site': current_site,
'site_name': current_site.name,
'title': _('Register'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def registration_closed(request,
template_name='users/registration_closed.html',
current_app=None,
extra_context=None):
context = {
'title': _('Registration closed'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def registration_complete(request,
template_name='users/registration_complete.html',
current_app=None,
extra_context=None):
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Registration complete'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@never_cache
def activate(request,
uidb64=None,
token=None,
template_name='users/activate.html',
post_activation_redirect=None,
current_app=None,
extra_context=None):
context = {
'title': _('Account activation '),
}
if post_activation_redirect is None:
post_activation_redirect = reverse('users_activation_complete')
UserModel = get_user_model()
assert uidb64 is not None and token is not None
token_generator = EmailActivationTokenGenerator()
try:
uid = urlsafe_base64_decode(uidb64)
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
user.activate()
user_activated.send(sender=user.__class__, request=request, user=user)
if settings.USERS_AUTO_LOGIN_ON_ACTIVATION:
user.backend = 'django.contrib.auth.backends.ModelBackend' # todo - remove this hack
login(request, user)
messages.info(request, 'Thanks for registering. You are now logged in.')
return redirect(post_activation_redirect)
else:
title = _('Email confirmation unsuccessful')
context = {
'title': title,
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def activation_complete(request,
template_name='users/activation_complete.html',
current_app=None,
extra_context=None):
context = {
'title': _('Activation complete'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
|
bsd-3-clause
| -3,139,041,058,954,845,000
| 35.431953
| 97
| 0.637486
| false
| 4.404149
| false
| false
| false
|
JustinTulloss/harmonize.fm
|
masterapp/masterapp/model/user.py
|
1
|
22317
|
# Justin Tulloss
#
# Putting user in its own file since it's huge
import logging
from pylons import cache, request, session, c
from pylons.templating import render
from decorator import decorator
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Table, sql
from sqlalchemy.sql import func, select, join, or_, and_
from sqlalchemy.orm import relation, join, synonym, aliased
from datetime import datetime
from . import (
metadata,
Session,
songs_table,
artists_table,
playlists_table,
spotlights_table,
Artist,
Album,
Song,
SongOwner,
RemovedOwner,
Playlist,
Spotlight,
SpotlightComment,
BlogEntry,
SongStat,
Recommendation
)
from facebook.wsgi import facebook
from facebook import FacebookError
from masterapp.lib import fblogin
from masterapp.lib.fbaccess import fbaccess
from operator import itemgetter, attrgetter
import time
log = logging.getLogger(__name__)
Base = declarative_base(metadata=metadata)
class User(Base):
"""
User class that abstracts away all information that deals with a user. It
pulls that data from whereever it might live, and takes care of all caching
and refetching of that data as well.
At the risk of being inconsistent, this is also the first mapped class to
take advantage of sqlalchemy's declarative extension, which is included with
sqlalchemy .5
"""
# Declarative constructs
__table__ = Table("users", Base.metadata, autoload=True)
__mapper_args__ = {'exclude_properties': ['nowplayingid', 'name']}
_nowplayingid = __table__.c.nowplayingid
_name = __table__.c.name
playlists = relation(Playlist, order_by=playlists_table.c.name)
fbid = None
fbinfo = None
listeningto = None
fbcache = None
fbfriendscache = None
fballfriendscache = None
present_mode = False
def __init__(self, fbid, **kws):
Base.__init__(self, **kws)
self.fbid = fbid
self.premium = False
def personal_cache(type=None, expiretime=None, addsession = False):
def wrapper(func, self, *args, **kwargs):
c = cache.get_cache('%s.%s' %
(func.__module__, func.__name__))
funcargs = {
'key': self.id,
'createfunc': lambda: func(self, *args, **kwargs)
}
if type:
funcargs['type'] = type
if expiretime:
funcargs['expiretime'] = expiretime
val = c.get_value(**funcargs)
if addsession:
if hasattr(val, '__iter__'):
for r in xrange(0, len(val)):
val[r] = Session.merge(val[r], dont_load=True)
else:
val = Session.merge(val, dont_load=True)
return val
return decorator(wrapper)
@decorator
def fbfriends(func, self, *args, **kwargs):
self._setup_fbfriends_cache()
self._fbfriends = self.fbfriendscache.get_value(
key = self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fbfriends
)
try:
return func(self, *args, **kwargs)
except:
# Try invalidating the cache
self.fbfriendscache.remove_value(self.fbid)
self._setup_fbfriends_cache()
self._fbfriends = self.fbfriendscache.get_value(self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fbfriends
)
return func(self, *args, **kwargs)
@decorator
def fballfriends(func, self, *args, **kwargs):
self._setup_fballfriends_cache()
self._fballfriends = self.fballfriendscache.get_value(self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fballfriends
)
try:
return func(self, *args, **kwargs)
except:
# Try invalidating the cache
self.fballfriendscache.remove_value(self.fbid)
self._setup_fballfriends_cache()
self._fballfriends = self.fballfriendscache.get_value(
key = self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fballfriends
)
return func(self, *args, **kwargs)
@decorator
def fbattr (func, self, *args, **kwargs):
self._setup_fbinfo_cache()
self.fbinfo = self.fbcache.get_value(
key = self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fbinfo
)
try:
return func(self, *args, **kwargs)
except:
self.fbcache.remove_value(self.fbid)
self.fbcache[self.fbid] = self._get_fbinfo()
self.fbinfo = self.fbcache.get_value(
key = self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fbinfo
)
return func(self, *args, **kwargs)
def _get_caches(self):
self.fbcache = cache.get_cache('fbprofile')
self.fbfriendscache = cache.get_cache('fbfriends')
self.fballfriendscache = cache.get_cache('fballfriends')
# Facebook session_key_expires is not set for some reason
#self._fbexpiration = facebook.session_key_expires - time.time()
self._fbexpiration = 24*60*60 #24 hours
def _setup_fbinfo_cache(self):
if not self.fbcache:
self._get_caches()
def _setup_fbfriends_cache(self):
if not self.fbfriendscache:
self._get_caches()
def _setup_fballfriends_cache(self):
if not self.fballfriendscache:
self._get_caches()
@fbaccess
def _get_fbinfo(self):
fields = [
'name',
'first_name',
'pic',
'pic_big',
'pic_square',
'music',
'sex',
'has_added_app'
]
info = facebook.users.getInfo(self.fbid, fields=fields)[0]
return info
@fbaccess
def _get_fbfriends(self):
olduid = facebook.uid
oldsession = facebook.session_key
if self.fbid != int(facebook.uid):
facebook.uid = unicode(self.fbid)
facebook.session_key = self.fbsession
log.debug("Querying for wrong user's friends, trying to sub in their session")
try:
try:
ids = facebook.friends.getAppUsers()
except FacebookError, e:
if e.code == 102:
if oldsession != facebook.session_key:
return [] #XXX: This is bad, but it fixes errors
if len(ids) == 0:
ids = []
if session.get('present') == True:
ids.extend([1909354, 1908861])
# I'm banking on caches in a big way here. I'm assuming that the vast
# majority of additional facebook information will be cached per user,
# so when we're actually accessing the attributes of these users 1 by 1,
# it won't be too expensive.
friendor = or_()
if ids:
for id in ids:
friendor.append(User.fbid == id)
users = Session.query(User).filter(friendor).order_by(User._name)
else:
return []
finally:
facebook.uid = olduid
facebook.session_key = oldsession
return users.all()
@fbaccess
def _get_fballfriends(self):
ids = facebook.friends.get()
users = facebook.users.getInfo(ids)
return sorted(users, key=itemgetter('name'))
@fbattr
def get_name(self):
if self._name != self.fbinfo['name']:
self._name = self.fbinfo['name']
Session.add(self)
Session.commit()
return self._name
name = property(get_name)
@fbattr
def get_firstname(self):
return self.fbinfo['first_name']
firstname = property(get_firstname)
@fbattr
def get_picture(self):
return self.fbinfo['pic']
picture = property(get_picture)
@fbattr
def get_bigpicture(self):
return self.fbinfo['pic_big']
bigpicture = property(get_bigpicture)
@fbattr
def get_swatch(self):
return self.fbinfo['pic_square']
swatch = property(get_swatch)
@fbattr
def get_musictastes(self):
return self.fbinfo['music']
musictastes = property(get_musictastes)
@fbattr
def get_sex(self):
return self.fbinfo['sex']
sex = property(get_sex)
@fbattr
def get_hasfbapp(self):
return self.fbinfo['has_added_app']
hasfbapp = property(get_hasfbapp)
def are_friends(self, user):
return user in self.friends
@fbfriends
def get_friends(self):
if self._fbfriends:
for i in xrange(0, len(self._fbfriends)):
self._fbfriends[i]= Session.merge(self._fbfriends[i], dont_load=True)
return self._fbfriends
else:
return []
friends = property(get_friends)
@fballfriends
def get_all_friends(self):
return self._fballfriends
allfriends = property(get_all_friends)
def is_friends_with(self, someguy):
"""
Tells you if a user is friends with another user.
"""
if isinstance(someguy, User):
if someguy.id == self.id:
return True
else:
for friend in self.friends:
if friend.id == someguy.id:
return True
return False
else:
if someguy['uid'] == self.fbid:
return True
else:
for friend in self.friends:
if friend.fbid == someguy['uid']:
return True
return False
@personal_cache(expiretime=600, type='memory')
def get_songcount(self):
count = Session.query(func.sum(AlbumCounts.songcount).label('songs')).\
filter(AlbumCounts.userid == self.id).first().songs
if count:
return int(count)
else:
return 0
songcount = property(get_songcount)
@personal_cache(expiretime=600, type='memory')
def get_albumcount(self):
return Session.query(func.count(AlbumCounts.albumid).label('albums')).\
filter(AlbumCounts.userid == self.id).first().albums
albumcount = property(get_albumcount)
def get_nowplaying(self):
return self._nowplaying
def set_nowplaying(self, song):
self._nowplayingid = song.id
stats = Session.query(SongStat).\
filter(SongStat.song == song).\
filter(SongStat.user == self)
if session.has_key('src'):
stats = stats.filter(SongStat.source == session['src'])
stats = stats.first()
if not stats:
stats = SongStat(user = self, song = song)
stats.playcount = stats.playcount + 1
stats.lastplayed = datetime.now()
if session.has_key('src'):
stats.source = session['src']
Session.add(stats)
nowplaying = property(get_nowplaying,set_nowplaying)
def get_url(self):
return 'http://%s/player#/people/profile/%d' % (request.host, self.id)
url = property(get_url)
def get_top_10_artists(self):
totalcount = Session.query(Artist.id, Artist.name,
func.sum(SongStat.playcount).label('totalcount')
)
totalcount = totalcount.join([Artist.songs, SongStat])
totalcount = totalcount.filter(SongStat.uid == self.id)
# this excludes any songs listened to on friend radio:
totalcount = totalcount.filter(or_(
SongStat.source == SongStat.FROM_OWN_LIBRARY,
SongStat.source == SongStat.FROM_BROWSE,
SongStat.source == SongStat.FROM_SPOTLIGHT,
SongStat.source == None))
totalcount = totalcount.group_by(Artist.id)
totalcount = totalcount.order_by(sql.desc('totalcount')).limit(10)
return totalcount.all()
top_10_artists = property(get_top_10_artists)
@personal_cache(expiretime=600, type='memory', addsession=True)
def get_feed_entries(self):
max_count=20
entries = Session.query(BlogEntry)[:max_count]
myor = or_()
for friend in self.friends:
myor.append(Spotlight.uid == friend.id)
if len(myor)>0:
entries.extend(Session.query(Spotlight).filter(
and_(myor, Spotlight.active==True)).\
order_by(sql.desc(Spotlight.timestamp))\
[:max_count])
commentor = or_()
spotlightor = or_()
for friend in self.friends:
commentor.append(SpotlightComment.uid == friend.id)
spotlightor.append(Spotlight.uid == friend.id)
if len(commentor)>0 and len(spotlightor)>0:
entries.extend(Session.query(SpotlightComment).\
join((Spotlight, SpotlightComment.spotlight)).\
filter(and_(
SpotlightComment.uid!=session['userid'],
or_(Spotlight.uid==session['userid'],
and_(commentor, spotlightor)),
Spotlight.active == True)).\
order_by(sql.desc(SpotlightComment.timestamp))[:max_count])
entries.extend(Session.query(Recommendation).\
filter(and_(
Recommendation.recommendeefbid == self.fbid,
Recommendation.active == True))[:max_count])
def sort_by_timestamp(x, y):
if x.timestamp == None:
if y.timestamp == None:
return 0
return 1
elif y.timestamp == None:
return -1
elif x.timestamp > y.timestamp:
return -1
elif x.timestamp == y.timestamp:
return 0
else:
return 1
entries.sort(sort_by_timestamp)
return entries[:max_count]
feed_entries = property(get_feed_entries)
def _build_song_query(self):
from masterapp.config.schema import dbfields
query = Session.query(SongOwner.uid.label('Friend_id'),
User._name.label('Friend_name'), *dbfields['song'])
query = query.join(Song.album).reset_joinpoint()
query = query.join(Song.artist).reset_joinpoint()
query = query.join(User).filter(SongOwner.uid == self.id)
return query
def get_song_query(self):
query = self._build_song_query()
return query.distinct()
song_query = property(get_song_query)
def get_song_count(self):
query = Session.query(SongOwner).filter(SongOwner.uid == self.id).count()
return query
song_count = property(get_song_count)
def get_album_query(self):
from masterapp.config.schema import dbfields
# Number of songs available on this album subquery
havesongs = Session.query(Album.id.label('albumid'),
func.count(Song.id).label('Album_havesongs'),
func.sum(Song.length).label('Album_length')
).join(Album.songs, SongOwner).filter(SongOwner.uid == self.id)
havesongs = havesongs.group_by(Album.id).subquery()
query = Session.query(SongOwner.uid.label('Friend_id'), havesongs.c.Album_havesongs,
havesongs.c.Album_length, User._name.label('Friend_name'),
*dbfields['album'])
joined = join(Album, havesongs, Album.id == havesongs.c.albumid)
query = query.select_from(joined)
query = query.join(Album.artist).reset_joinpoint()
query = query.join(Album.songs, SongOwner, SongOwner.user).filter(SongOwner.uid == self.id)
query = query.group_by(Album)
return query
album_query = property(get_album_query)
def get_playlist_query(self):
from masterapp.config.schema import dbfields
query = Session.query(Playlist.ownerid.label('Friend_id'),
*dbfields['playlist']).\
filter(Playlist.ownerid == self.id)
return query
playlist_query = property(get_playlist_query)
def get_artist_query(self):
from masterapp.config.schema import dbfields
# Build the main query
query = Session.query(SongOwner.uid.label('Friend_id'),
User._name.label('Friend_name'),
ArtistCounts.songcount.label('Artist_availsongs'),
ArtistCounts.albumcount.label('Artist_numalbums'),
*dbfields['artist'])
query = query.join(Artist.albums, Song, SongOwner, SongOwner.user).\
join((ArtistCounts, and_(
SongOwner.uid == ArtistCounts.userid,
Artist.id == ArtistCounts.artistid,
Artist.id == Album.artistid)))
query = query.filter(SongOwner.uid == self.id)
query = query.group_by(Artist)
return query
artist_query = property(get_artist_query)
def get_album_by_id(self, id):
qry = self.album_query
qry = qry.filter(Album.id == id)
return qry.first()
def get_active_spotlights(self):
return Session.query(Spotlight).filter(sql.and_(\
Spotlight.uid==self.id, Spotlight.active==True)).\
order_by(sql.desc(Spotlight.timestamp))
active_spotlights = property(get_active_spotlights)
def get_inactive_spotlights(self):
return Session.query(Spotlight).filter(sql.and_(
Spotlight.uid==self.id, Spotlight.active==False)).\
order_by(sql.desc(Spotlight.timestamp))
inactive_spotlights = property(get_inactive_spotlights)
def get_playlist_by_id(self, id):
qry = self.playlist_query
qry = qry.filter(Playlist.id == id)
return qry.first()
def get_song_by_id(self, id):
return self.song_query.filter(Song.id == id).first()
def add_song(self, song):
"""
Adds a song to this user's collection. Keeps counts up to date.
"""
# Add to collection
owner = SongOwner(song = song, user = self)
# Keep counts up to date
new_album = False
albumc = Session.query(AlbumCounts).get((song.album.id, self.id))
if albumc:
albumc.songcount += 1
else:
new_album = True
albumc = AlbumCounts(user = self, album = song.album, songcount=1)
artistc = Session.query(ArtistCounts).get((song.album.artistid, self.id))
if artistc:
artistc.songcount += 1
if new_album:
artistc.albumcount += 1
else:
artistc = ArtistCounts(
user=self, artist=song.album.artist, songcount=1, albumcount=1)
Session.add_all([owner, artistc, albumc])
Session.commit()
return owner
def remove_song(self, songrow):
"""
Removes a song from the users's collection and updates the counts.
"""
# the passed song is a RowTuple, so we convert it so a Song object
song = Session.query(Song).get(songrow.Song_id)
movedowner = RemovedOwner(
song = song,
user = self
)
Session.add(movedowner)
owner = Session.query(SongOwner).\
filter(SongOwner.song == song).\
filter(SongOwner.user == self).first()
Session.delete(owner)
albumc = Session.query(AlbumCounts).get((song.albumid, self.id))
albumc.songcount -= 1
remove_album = False
if albumc.songcount == 0:
remove_album = True
artistc = Session.query(ArtistCounts).get((song.album.artistid, self.id))
artistc.songcount -= 1
if remove_album:
artistc.albumcount -= 1
Session.add(artistc)
return True
@fbaccess
def update_profile(self):
c.user = self
fbml = render('facebook/profile.mako.fbml')
facebook.profile.setFBML(fbml)
@fbaccess
def publish_spotlight(self, spot):
title_t = """
{actor} created
<fb:if-multiple-actors>Spotlights
<fb:else>a Spotlight </fb:else>
</fb:if-multiple-actors>
on {album} at
<a href="http://harmonize.fm" target="_blank">harmonize.fm</a>
"""
title_d = '{"album":"%s"}' % spot.title
r = facebook.feed.publishTemplatizedAction(
title_template=title_t,
title_data=title_d
)
return r
def add_spotlight(self, spotlight):
spotlight.user = self
Session.add(spotlight)
spotlight.unactivate_lru()
self.publish_spotlight(spotlight)
self.update_profile()
def add_me_to_friends(self):
for friend in self.friends:
try:
friend.friends.append(self)
friend.friends.sort(key=attrgetter('name'))
except:
# oh well, they'll find me eventually
logging.debug('Could not be added to %s', friend.id)
def update_friends_caches(self):
for friend in self.friends:
self.fbfriendscache.remove_value(friend.id)
def get_recommendations(self):
return Session.query(Recommendation).filter(
sql.and_(Recommendation.recommendeefbid == self.fbid,
Recommendation.active == True)).\
order_by(sql.desc(Recommendation.timestamp))
recommendations = property(get_recommendations)
class ArtistCounts(Base):
__table__ = Table('counts_artist', metadata, autoload=True)
key = [__table__.c.artistid, __table__.c.userid]
__mapper_args__ = {'primary_key': key}
artist = relation(Artist)
user = relation(User)
class AlbumCounts(Base):
__table__ = Table('counts_album', metadata, autoload=True)
key = [__table__.c.albumid, __table__.c.userid]
__mapper_args__ = {'primary_key': key}
album = relation(Album)
user = relation(User)
|
mit
| 8,144,348,351,670,925,000
| 32.916413
| 99
| 0.577049
| false
| 3.847759
| false
| false
| false
|
ARM-software/trappy
|
tests/test_utils.py
|
1
|
2217
|
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import unittest
from trappy import utils
import pandas
from pandas.util.testing import assert_series_equal
class TestUtils(unittest.TestCase):
def test_handle_duplicate_index(self):
"""Test Util Function: handle_duplicate_index
"""
# Refer to the example in the function doc string
values = [0, 1, 2, 3, 4]
index = [0.0, 1.0, 1.0, 6.0, 7.0]
series = pandas.Series(values, index=index)
new_index = [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0]
with self.assertRaises(ValueError):
series.reindex(new_index)
max_delta = 0.001
expected_index = [0.0, 1.0, 1 + max_delta, 6.0, 7.0]
expected_series = pandas.Series(values, index=expected_index)
series = utils.handle_duplicate_index(series, max_delta)
assert_series_equal(series, expected_series)
# Make sure that the reindex doesn't raise ValueError any more
series.reindex(new_index)
def test_handle_duplicate_index_duplicate_end(self):
"""handle_duplicate_index copes with duplicates at the end of the series"""
max_delta = 0.001
values = [0, 1, 2, 3, 4]
index = [0.0, 1.0, 2.0, 6.0, 6.0]
expected_index = index[:]
expected_index[-1] += max_delta
series = pandas.Series(values, index=index)
expected_series = pandas.Series(values, index=expected_index)
series = utils.handle_duplicate_index(series, max_delta)
assert_series_equal(series, expected_series)
|
apache-2.0
| -5,604,032,030,369,191,000
| 35.344262
| 83
| 0.666216
| false
| 3.664463
| true
| false
| false
|
RazerM/pg_grant
|
tests/conftest.py
|
1
|
2475
|
from pathlib import Path
import pytest
import testing.postgresql
from sqlalchemy import create_engine, text
from sqlalchemy.engine.url import make_url
from testcontainers.postgres import PostgresContainer as _PostgresContainer
tests_dir = Path(__file__).parents[0].resolve()
test_schema_file = Path(tests_dir, 'data', 'test-schema.sql')
SUPERUSER_NAME = 'alice'
DB_NAME = 'db1'
Postgresql = testing.postgresql.PostgresqlFactory(
initdb_args='-U postgres -A trust',
database=DB_NAME,
)
class PostgresContainer(_PostgresContainer):
POSTGRES_USER = 'postgres'
POSTGRES_DB = DB_NAME
def pytest_addoption(parser):
parser.addoption(
'--no-container', action='store_true',
help='Use temporary PostgreSQL cluster without a container.')
def pytest_runtest_setup(item):
if 'nocontainer' in item.keywords and not item.config.getoption('--no-container'):
pytest.skip('Use --no-container to execute this test.')
@pytest.fixture(scope='session')
def postgres_url(request):
no_container = request.config.getoption("--no-container")
if no_container:
postgresql = Postgresql()
# Use superuser to create new superuser, then yield new connection URL
url = make_url(postgresql.url())
engine = create_engine(url)
engine.execute('CREATE ROLE {} WITH SUPERUSER LOGIN'.format(SUPERUSER_NAME))
engine.dispose()
url.username = SUPERUSER_NAME
yield str(url)
else:
postgres_container = PostgresContainer("postgres:latest")
with postgres_container as postgres:
# Use superuser to create new superuser, then yield new connection URL
url = make_url(postgres.get_connection_url())
engine = create_engine(url)
engine.execute(
text(
'CREATE ROLE {} WITH SUPERUSER LOGIN PASSWORD '
':password'.format(SUPERUSER_NAME)
),
password=postgres_container.POSTGRES_PASSWORD,
)
engine.dispose()
url.username = SUPERUSER_NAME
yield str(url)
@pytest.fixture(scope='session')
def engine(postgres_url):
return create_engine(postgres_url)
@pytest.fixture(scope='session')
def pg_schema(engine):
with test_schema_file.open() as fp:
engine.execute(fp.read())
@pytest.fixture
def connection(engine, pg_schema):
with engine.connect() as conn:
yield conn
|
mit
| 4,059,386,316,065,879,600
| 28.117647
| 86
| 0.657778
| false
| 4.057377
| true
| false
| false
|
raymondnoonan/Mpropulator
|
MPropulator/write_tab.py
|
1
|
1656
|
from MPropulator import helpers
import string
def write_tab(sheet, table_data, xls_startcell, skiprows, skipcols):
"""Writes the data for a particular table to the corresponding
Excel spreadsheet.
sheet: openpyxl worksheet to which you're writing
table_data: pandas data frame containing data to write
xls_startcell: cell in the sheet at which you will begin writing
skiprows: list of rows in Excel spreadsheet to skip
skipcols: list of columns in Excel spreadsheet to skip
"""
num_rows = table_data.shape[0]
num_cols = table_data.shape[1]
# We subtract one to remain 0-indexed
start_row = int(xls_startcell.translate(None, string.ascii_letters)) - 1
start_col = helpers.col_to_number(xls_startcell.translate(None,
string.digits))
num_skipcols = [helpers.col_to_number(col) for col in skipcols]
total_rows = start_row + num_rows + len(skiprows)
table_rows_to_write = [row for row in range(start_row, total_rows) if
row not in skiprows]
total_cols = start_col + num_cols + len(skipcols)
table_cols_to_write = [col for col in range(start_col, total_cols) if
col not in num_skipcols]
for row_idx, row in enumerate(table_rows_to_write):
for col_idx, col in enumerate(table_cols_to_write):
current_cell = helpers.cell_name(row, col)
value = table_data.iloc[row_idx, col_idx]
try:
value = float(value)
except ValueError:
pass
sheet[current_cell].value = value
|
mit
| 5,847,719,726,850,679,000
| 37.511628
| 77
| 0.624396
| false
| 3.842227
| false
| false
| false
|
johnny555/2d3g
|
viz.py
|
1
|
5005
|
__author__ = 'Admin'
def group_bands(depth, coal_labels, holeID):
coal_label_list = ['RU', 'R', 'R1', 'R2', 'RO', 'RL', 'MU', 'MM', 'MML', 'LN', 'TR', 'TRL', 'PS', 'PSL', 'P2', 'P2U',
'P2LA', 'P2LB', 'BA', 'G1', 'G2', 'G34', 'G3', 'G4', 'G56', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10',
'G11', 'BGA', 'BGB' 'BGC', 'BG', 'HEU', 'HEL', 'CN', 'FH', 'FL', 'MAC', 'PX', 'PU', 'PM', 'P',
'PL', 'AQ', 'AQL', 'T1UA', 'T1UB', 'T1U', 'T1M', 'T1L', 'T2', 'C1U', 'C1', 'C1L', 'CM', 'CM',
'CS', 'C2', 'GUS' 'GU', 'GC', 'GL', 'BN']
deltaD = depth[1]-depth[0]
dist_from_last_coal_seam = float('inf')
seam_prop = []
seam_list = []
for i, label in enumerate(coal_labels):
if label in coal_label_list:
if (dist_from_last_coal_seam == float('inf')) or (dist_from_last_coal_seam is not 0):
dist_from_last_coal_seam = 0
seam_prop.append(depth[i])
elif (label not in coal_labels) and (dist_from_last_coal_seam == 0):
seam_prop.append(depth[i])
seam_list.append(seam_prop)
seam_prop = []
dist_from_last_coal_seam += deltaD
print seam_list
allowable_dist = 20
group_no = 1
nSeam = len(seam_list)
group_list = [group_no]
for iSeam in range(nSeam-1):
if seam_list[iSeam+1][0] - seam_list[iSeam][1] > allowable_dist:
group_no += 1
group_list.append(group_no)
print group_list
out_list = []
for i, seam in enumerate(seam_list):
out_dict = {}
out_dict['top'] = seam[0]
out_dict['bot'] = seam[1]
out_dict['type'] = group_list[i]
out_list.append(out_dict)
import json
with open('%s_seaminfo.json'%holeID,'w') as fp:
json.dump(out_list, fp)
return seam_list
def display_acoustic(df, holeID, useful_features = ['ADEN', 'BRDU', 'CADE', 'CODE', 'DENB', 'DENL', 'GRDE', 'LSDU']):
import matplotlib.pyplot as plt
feature_list = df.columns
# print feature_list
accoustic_features = []
for feature in feature_list:
if 'ATV_AMP' in feature:
accoustic_features.append(feature)
# print accoustic_features
accoustic_scan = df[accoustic_features].values
coal_label_list = ['RU', 'R', 'R1', 'R2', 'RO', 'RL', 'MU', 'MM', 'MML', 'LN', 'TR', 'TRL', 'PS', 'PSL', 'P2', 'P2U',
'P2LA', 'P2LB', 'BA', 'G1', 'G2', 'G34', 'G3', 'G4', 'G56', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10',
'G11', 'BGA', 'BGB' 'BGC', 'BG', 'HEU', 'HEL', 'CN', 'FH', 'FL', 'MAC', 'PX', 'PU', 'PM', 'P',
'PL', 'AQ', 'AQL', 'T1UA', 'T1UB', 'T1U', 'T1M', 'T1L', 'T2', 'C1U', 'C1', 'C1L', 'CM', 'CM',
'CS', 'C2', 'GUS' 'GU', 'GC', 'GL', 'BN']
# useful_features = ['ADEN', 'BRDU', 'CADE', 'CODE', 'DENB', 'DENL', 'GRDE', 'LSDU']
nPlots = len(useful_features) + 2
iPlot = 1
for feature in useful_features:
plt.subplot(1,nPlots,iPlot)
plt.plot(df[feature].values, df['DEPTH'].values)
plt.ylim(min(df['DEPTH'].values), max(df['DEPTH'].values))
# plt.title(feature)
plt.gca().invert_yaxis()
plt.axis('off')
iPlot += 1
plt.subplot(1,nPlots,iPlot)
plt.imshow(accoustic_scan, aspect='auto')
plt.ylim(1, len(accoustic_scan))
plt.title('Acoustic scan')
plt.gca().invert_yaxis()
iPlot += 1
plt.subplot(1,nPlots,iPlot)
# plt.plot([l in coal_label_list for l in df['LABELS'].values], df['DEPTH'].values)
x1 = [l in coal_label_list for l in df['LABELS'].values]
# x2 = [2 if x == True else 0 for x in x1]
x2 = [0]*len(x1)
y1 = df['DEPTH'].values
y2 = y1
plt.plot((x1, x2), (y1, y2), 'k-')
plt.ylim(min(df['DEPTH'].values), max(df['DEPTH'].values))
plt.title('Label')
plt.gca().invert_yaxis()
iPlot += 1
# plt.imsave('%s.png'%holeID)
plt.savefig('%s.png'%holeID)
group_bands(df['DEPTH'].values, df['LABELS'].values, holeID = holeID)
plt.show()
if __name__ == '__main__':
import pandas as pd
import matplotlib.pyplot as plt
# holeID = [ 'DD1102']
#
# # extract_seams(bore_id = holeID, seam_list = hole_boundaries)
# [extract_seams(bore_id=h) for h in holeID]
holeID = 'DD1097'
holeID = 'DD1098'
holeID = 'DD1099'
# holeID = 'DD1100'
# holeID = 'DD1102'
shit = [ 'DD1101','DD1106' ]
done = [ ]
holeId = ['DD1097',
'DD1098',
'DD1099',
'DD1100',
'DD1102',
'DD1104', 'DD1105' ,'DD1107', 'DD1108','DD1103',
'DD0541',
'DD0542',
'DD0551',
'DD0980A',
'DD0989',
'DD0991',
'DD0992',
'DD1000',
'DD1005',
'DD1006',
'DD1010',
'DD1012',
'DD1013',
'DD1014']
for holeID in holeId:
df = pd.read_csv('dats/%s_cleandata.csv'%holeID)
display_acoustic(df, holeID)
|
bsd-2-clause
| 1,808,144,811,737,757,200
| 24.804124
| 121
| 0.507493
| false
| 2.613577
| false
| false
| false
|
letsmeet-click/letsmeet.click
|
letsmeet/events/migrations/0001_initial.py
|
1
|
2446
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-28 00:55
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('communities', '0003_auto_20151227_2241'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=64, unique=True)),
('slug', models.SlugField(max_length=64, unique=True)),
('begin', models.DateTimeField(default=django.utils.timezone.now)),
('end', models.DateTimeField(default=django.utils.timezone.now)),
('community', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='communities.Community')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='EventRSVP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('coming', models.BooleanField()),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rsvps', to='events.Event')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rsvps', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='eventrsvp',
unique_together=set([('event', 'user')]),
),
]
|
mit
| -1,845,259,420,887,350,000
| 45.150943
| 145
| 0.616926
| false
| 4.246528
| false
| false
| false
|
teonlamont/mne-python
|
mne/datasets/brainstorm/bst_auditory.py
|
4
|
1920
|
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
#
# License: BSD (3-clause)
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
_data_path_doc)
has_brainstorm_data = partial(has_dataset, name='brainstorm')
_description = u"""
URL: http://neuroimage.usc.edu/brainstorm/DatasetAuditory
- One subject, two acquisition runs of 6 minutes each
- Subject stimulated binaurally with intra-aural earphones
(air tubes+transducers)
- Each run contains:
- 200 regular beeps (440Hz)
- 40 easy deviant beeps (554.4Hz, 4 semitones higher)
- Random inter-stimulus interval: between 0.7s and 1.7s seconds, uniformly
distributed
- The subject presses a button when detecting a deviant with the right
index finger
- Auditory stimuli generated with the Matlab Psychophysics toolbox
"""
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='brainstorm',
download=download, archive_name='bst_auditory.tar.gz')
_data_path_doc = _data_path_doc.format(name='brainstorm',
conf='MNE_DATASETS_BRAINSTORM_DATA'
'_PATH')
_data_path_doc = _data_path_doc.replace('brainstorm dataset',
'brainstorm (bst_auditory) dataset')
data_path.__doc__ = _data_path_doc
def get_version(): # noqa: D103
return _get_version('brainstorm')
get_version.__doc__ = _version_doc.format(name='brainstorm')
def description():
"""Get description of brainstorm (bst_auditory) dataset."""
for desc in _description.splitlines():
print(desc)
|
bsd-3-clause
| -4,784,413,848,918,143,000
| 33.285714
| 78
| 0.639063
| false
| 3.50365
| false
| false
| false
|
dtudares/hello-world
|
yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
|
1
|
3858
|
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for
# yardstick.benchmark.scenarios.networking.netperf_node.NetperfNode
import mock
import unittest
import os
import json
from yardstick.benchmark.scenarios.networking import netperf_node
@mock.patch('yardstick.benchmark.scenarios.networking.netperf_node.ssh')
class NetperfNodeTestCase(unittest.TestCase):
def setUp(self):
self.ctx = {
'host': {
'ip': '192.168.10.10',
'user': 'root',
'password': 'root'
},
'target': {
'ip': '192.168.10.11',
'user': 'root',
'password': 'root'
}
}
def test_netperf_node_successful_setup(self, mock_ssh):
p = netperf_node.NetperfNode({}, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.setup()
self.assertIsNotNone(p.server)
self.assertIsNotNone(p.client)
self.assertEqual(p.setup_done, True)
def test_netperf_node_successful_no_sla(self, mock_ssh):
options = {}
args = {'options': options}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
def test_netperf_node_successful_sla(self, mock_ssh):
options = {}
args = {
'options': options,
'sla': {'mean_latency': 100}
}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
def test_netperf_node_unsuccessful_sla(self, mock_ssh):
options = {}
args = {
'options': options,
'sla': {'mean_latency': 5}
}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
self.assertRaises(AssertionError, p.run, result)
def test_netperf_node_unsuccessful_script_error(self, mock_ssh):
options = {}
args = {'options': options}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, p.run, result)
def _read_sample_output(self):
curr_path = os.path.dirname(os.path.abspath(__file__))
output = os.path.join(curr_path, 'netperf_sample_output.json')
with open(output) as f:
sample_output = f.read()
return sample_output
def main():
unittest.main()
if __name__ == '__main__':
main()
|
apache-2.0
| -2,371,470,591,946,177,000
| 29.377953
| 78
| 0.554951
| false
| 3.720347
| true
| false
| false
|
uogbuji/Library.Link
|
pylib/resource.py
|
1
|
2994
|
'''
'''
import re
import http
import asyncio
from itertools import *
from versa.driver import memory
from versa import I, VERSA_BASEIRI, ORIGIN, RELATIONSHIP, TARGET, ATTRIBUTES
from versa.reader import rdfalite
from versa.reader.rdfalite import RDF_NS, SCHEMAORG_NS
from versa import util as versautil
#from bibframe import BFZ, BL
#from bibframe.zextra import LL
#from rdflib import URIRef, Literal
#from rdflib import BNode
from amara3 import iri
from amara3.uxml import tree
from amara3.uxml import xml
from amara3.uxml.treeutil import *
from amara3.uxml import html5
RDFTYPE = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'
SCHEMAORG = 'http://schema.org/'
LL_RESOURCE_BASE = 'http://library.link/resource/'
LL_ISBN_STEMPLATE = 'http://library.link/id/isbn/{isbn}/brief.json'
OPENLIBRARY_TITLESEARCHBASE = 'http://openlibrary.org/search.json'
async def BUSTED(title, session=None, max_retries=1):
'''
Async helper to get information from isbn.nu for a title query
Returns a JSON object
>>> from amara3.asynctools import go_async
>>> from librarylink.util import rdfa_from_page
>>> from versa import util as versautil
>>> url = "http://library.link/resource/2_8BKlrtCTI/brief.json"
>>> obj = go_async(network_resource_content(url))
'''
for isbn in [ compute_ean13_check(i) for i in c14n_isbns]:
task = network_isbn_info(isbn)
tasks.append(task)
ll_result_sets = await asyncio.gather(*tasks)
for isbn, result in ll_result_sets:
#print(isbn, result)
if result and isbn not in isbns_seen:
filtered_isbns.append(isbn)
isbns_seen.add(isbn)
if filtered_isbns:
ll_super_list.append({'label': group_label, 'isbns': filtered_isbns})
async def network_isbn_info(isbn, session=None, max_retries=1):
'''
Async helper to get JSON content from network resource page
Returns a JSON object
>>> from amara3.asynctools import go_async
>>> from librarylink.resource import network_isbn_info
>>> obj = go_async(network_isbn_info(9780871290861))
>>> obj['workExample'][0].get('holdings_count')
19
'''
retry_count = 0
url = LL_ISBN_STEMPLATE.format(**{'isbn': isbn})
#print('processing', url, file=sys.stderr)
while True:
model = memory.connection()
try:
if session == None:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
obj = await response.json()
return obj
else:
async with session.get(url) as response:
obj = await response.json()
return obj
except Exception as e:
#print(url, f'[EXCEPTION {e}], context: {context}', file=sys.stderr)
retry_count += 1
if retry_count >= max_retries:
return None
await asyncio.sleep(0.2)
|
apache-2.0
| 7,825,348,551,972,483,000
| 31.193548
| 80
| 0.641283
| false
| 3.386878
| false
| false
| false
|
al8/raspberrypi-photosensor
|
photosensor.py
|
1
|
2036
|
#!/usr/bin/env python
from __future__ import print_function
# Example for RC timing reading for Raspberry Pi
# Must be used with GPIO 0.3.1a or later - earlier verions
# are not fast enough!
import argparse
import time
import sys
import atexit
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
g_RCpin = None
def RCtime (RCpin, sleep, maxvalue):
global g_RCpin
g_RCpin = RCpin
reading = 0
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
# This takes about 1 millisecond per loop cycle
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
time.sleep(sleep)
if reading >= maxvalue: break
return reading
@atexit.register
def setread():
if g_RCpin is None:
return
GPIO.setup(g_RCpin, GPIO.IN)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='photosensor, resistor/capacitor timer method. larger numbers are darker, default values tuned for 3uF capacitor.')
parser.add_argument("--pin", type=int, default=18, help="gpio pin used")
parser.add_argument("--div", type=int, default=1, help="divide final value by this")
parser.add_argument("--sleep", type=float, default=0.04, help="sleep between counter in counting")
parser.add_argument("--maxvalue", type=int, default=50, help="max 'darkness' to be detected")
parser.add_argument("--outfile", "-o", type=str, default="/tmp/photosensor.value")
parser.add_argument("--debug", action="store_true")
options = parser.parse_args()
if options.debug:
print("using pin %d" % options.pin)
while True:
reading = RCtime(options.pin, options.sleep, options.maxvalue) / options.div # Read RC timing using pin #18
if options.debug:
print("%s: " % time.asctime(), file=sys.stderr, end='')
print(reading)
with open(options.outfile, "wb") as f:
f.write("%d" % reading)
time.sleep(0.5)
|
mit
| -7,217,418,351,592,861,000
| 31.83871
| 164
| 0.641454
| false
| 3.480342
| false
| false
| false
|
oseledets/pybtex
|
pybtex/style/labels/alpha.py
|
1
|
6453
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
if sys.version_info < (2, 7):
from counter import Counter
else:
from collections import Counter
import re
import string
import unicodedata
from pybtex.style.labels import BaseLabelStyle
_nonalnum_pattern = re.compile('[^A-Za-z0-9]+', re.UNICODE)
def _strip_accents(s):
return u''.join(
(c for c in unicodedata.normalize('NFD', s)
if not unicodedata.combining(c)))
def _strip_nonalnum(parts):
"""Strip all non-alphanumerical characters from a list of strings.
>>> print _strip_nonalnum([u"ÅA. B. Testing 12+}[.@~_", u" 3%"])
AABTesting123
"""
s = u''.join(parts)
return _nonalnum_pattern.sub(u'', _strip_accents(s))
class LabelStyle(BaseLabelStyle):
def format_labels(self, sorted_entries):
labels = [self.format_label(entry) for entry in sorted_entries]
count = Counter(labels)
counted = Counter()
for label in labels:
if count[label] == 1:
yield label
else:
yield label + chr(ord('a') + counted[label])
counted.update([label])
# note: this currently closely follows the alpha.bst code
# we should eventually refactor it
def format_label(self, entry):
# see alpha.bst calc.label
if entry.type == "book" or entry.type == "inbook":
label = self.author_editor_key_label(entry)
elif entry.type == "proceedings":
label = self.editor_key_organization_label(entry)
elif entry.type == "manual":
label = self.author_key_organization_label(entry)
else:
label = self.author_key_label(entry)
if "year" in entry.fields:
return label + entry.fields["year"][-2:]
else:
return label
# bst additionally sets sort.label
def author_key_label(self, entry):
# see alpha.bst author.key.label
if not "author" in entry.persons:
if not "key" in entry.fields:
return entry.key[:3] # entry.key is bst cite$
else:
# for entry.key, bst actually uses text.prefix$
return entry.fields["key"][:3]
else:
return self.format_lab_names(entry.persons["author"])
def author_editor_key_label(self, entry):
# see alpha.bst author.editor.key.label
if not "author" in entry.persons:
if not "editor" in entry.persons:
if not "key" in entry.fields:
return entry.key[:3] # entry.key is bst cite$
else:
# for entry.key, bst actually uses text.prefix$
return entry.fields["key"][:3]
else:
return self.format_lab_names(entry.persons["editor"])
else:
return self.format_lab_names(entry.persons["author"])
def author_key_organization_label(self, entry):
if not "author" in entry.persons:
if not "key" in entry.fields:
if not "organization" in entry.fields:
return entry.key[:3] # entry.key is bst cite$
else:
result = entry.fields["organization"]
if result.startswith("The "):
result = result[4:]
return result
else:
return entry.fields["key"][:3]
else:
return self.format_lab_names(entry.persons["author"])
def editor_key_organization_label(self, entry):
if not "editor" in entry.persons:
if not "key" in entry.fields:
if not "organization" in entry.fields:
return entry.key[:3] # entry.key is bst cite$
else:
result = entry.fields["organization"]
if result.startswith("The "):
result = result[4:]
return result
else:
return entry.fields["key"][:3]
else:
return self.format_lab_names(entry.persons["editor"])
def format_lab_names(self, persons):
# see alpha.bst format.lab.names
# s = persons
numnames = len(persons)
if numnames > 1:
if numnames > 4:
namesleft = 3
else:
namesleft = numnames
result = ""
nameptr = 1
while namesleft:
person = persons[nameptr - 1]
if nameptr == numnames:
if unicode(person) == "others":
result += "+"
else:
result += _strip_nonalnum(
person.prelast(abbr=True) + person.last(abbr=True))
else:
result += _strip_nonalnum(
person.prelast(abbr=True) + person.last(abbr=True))
nameptr += 1
namesleft -= 1
if numnames > 4:
result += "+"
else:
person = persons[0]
result = _strip_nonalnum(
person.prelast(abbr=True) + person.last(abbr=True))
if len(result) < 2:
result = _strip_nonalnum(person.last(abbr=False))[:3]
return result
|
mit
| 3,171,189,926,441,258,500
| 36.511628
| 79
| 0.566491
| false
| 4.195059
| false
| false
| false
|
alisheykhi/SocialPDA
|
Degree_Anonimity.py
|
1
|
2374
|
from graph_util import ReadGraph
from micro_pda import MicroPDA
from particle_pda1 import ParticlePDA
from swarm_pda import SwarmPDA
from Measurments import measurmnets
import time
import datetime
import pymysql
import json
# avg , std , best for 30 times run,
# file name = datasetName_Beta_k_Delta_l
# levels = low, medium, high, and critical
#graph_caida = ReadGraph("caida.txt",6)
# graph_caida = ReadGraph("polblogs.gml",1)
# #graph_caida = ReadGraph("polbooks.gml",level=3)
# optimal_omega_cluster = MicroPDA(graph_caida.sorted_degree_sequence)
# particle_pda = ParticlePDA(omega_clusters=optimal_omega_cluster.omega_clusters,beta= 0.01,
# removed_omega_clusters=optimal_omega_cluster.removed_omega_clusters)
# particle_pda.plotResults()
# anonymizedcluster = particle_pda.clusterWithAvg()
# swarm_pda = SwarmPDA(omega_clusters=particle_pda.clusters_avg_embedded,graph_G= graph_caida.G)
# sol = swarm_pda.run_swarm()
# measurment = measurmnets(graph_caida.G, sol['original'], sol['modified'])
# measure = measurment.calculate_measures()
# for key,value in measure.iteritems():
# print key , '----->' , value
graph_name = 'caida.txt'
level = 6
beta = 0.5
l = 30
run = 30
graph = ReadGraph(graph_name,level)
db = pymysql.connect(host="localhost",
user="root",
passwd="",
db="SocialPda")
connection = db.cursor()
optimal_omega_cluster = MicroPDA(graph.sorted_degree_sequence)
cluster = json.dump(optimal_omega_cluster.omega_clusters)
print cluster
# for i in range(1,run+1):
# optimal_omega_cluster = MicroPDA(graph.sorted_degree_sequence)
# cluster = json.dumps(optimal_omega_cluster.omega_clusters)
# #insert into micropda
# ts = time.time()
# timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#
# sql = "INSERT INTO `SocialPDA`.`micropda` (`dataset`, `Beta`, `l`, `date`, `k`, `delta`, `omega_cluster`, `run`) VALUES (%s,%s,%s,%s,%s,%s,%r,%s)"
# connection.execute(sql, (graph_name, beta, l, timestamp, '1', '1',cluster , i))
# connection.commit()
# connection.close()
# particle_pda = ParticlePDA(omega_clusters=optimal_omega_cluster.omega_clusters,beta= beta,
# removed_omega_clusters=optimal_omega_cluster.removed_omega_clusters)
#insert into particlepda
|
apache-2.0
| -1,964,974,860,067,055,400
| 33.926471
| 152
| 0.680286
| false
| 3.103268
| false
| false
| false
|
DoubleGremlin181/Scripting
|
Wikipedia.py
|
1
|
1131
|
#A script to check if any two wikipedia pages have any common wiki links
#TODO: Check on multiple levels, i.e. check if any of the linked wikis on the pages have a common wiki link
import requests
import wikipedia
from bs4 import BeautifulSoup
def get_list(page):
links_page=["/wiki/"+page.split("/")[4]]
data = requests.get(page).content
soup = BeautifulSoup(data, "lxml")
for temp in soup.find_all("a"):
if temp.parent.name =="p":
links_page.append(temp["href"])
return links_page
def compare(list1,list2):
common=[]
for i in list1:
if i in list2:
common.append("https://en.wikipedia.org"+i)
return common
def main():
page1 = raw_input("Enter the url of the first page\n>")
page2 = raw_input("Enter the url of the second page\n>")
links_page1=get_list(page1)
links_page2=get_list(page2)
common = compare(links_page1,links_page2)
print "\nThe pages are directly linked through the following wikis:\n"
print '\n'.join(['%i: %s' % (n+1, common[n]) for n in xrange(len(common))])
if __name__ == '__main__':
main()
|
gpl-3.0
| -1,919,865,110,978,478,600
| 25.302326
| 107
| 0.641026
| false
| 3.396396
| false
| false
| false
|
f-prettyland/angr
|
angr/exploration_techniques/tracer.py
|
1
|
16414
|
import os
import logging
import claripy
from . import ExplorationTechnique, Cacher
from .. import BP_BEFORE
from ..calling_conventions import SYSCALL_CC
from ..errors import AngrTracerError, SimMemoryError, SimEngineError
from ..storage.file import SimFile
l = logging.getLogger("angr.exploration_techniques.tracer")
class Tracer(ExplorationTechnique):
"""
An exploration technique that follows an angr path with a concrete input.
The tracing result is the state after executing the last basic block of the
given trace and can be found in 'traced' stash.
If the given concrete input makes the program crash, the last correct
states that you might want are kept in the 'predecessors' list. The crashed
state can be found with CrashMonitor exploration technique.
"""
def __init__(self, trace=None, resiliency=True, use_cache=True, dump_syscall=False, keep_predecessors=1):
"""
:param trace : The basic block trace.
:param resiliency : Should we continue to step forward even if qemu and angr disagree?
:param use_cache : True if we want to use caching system.
:param dump_syscall : True if we want to dump the syscall information.
:param keep_predecessors: Number of states before the final state we should preserve.
Default 1, must be greater than 0.
"""
super(Tracer, self).__init__()
self._trace = trace
self._resiliency = resiliency
self._dump_syscall = dump_syscall
# keep track of the last basic block we hit
if keep_predecessors < 1:
raise ValueError("Must have keep_predecessors >= 1")
self.predecessors = [None] * keep_predecessors
# whether we should follow the trace
self._no_follow = self._trace is None
# initilize the syscall statistics if the flag is on
if self._dump_syscall:
self._syscalls = []
self._use_cache = use_cache
def setup(self, simgr):
self.project = simgr._project
s = simgr.active[0]
# initialize the basic block counter to 0
s.globals['bb_cnt'] = 0
if self._dump_syscall:
s.inspect.b('syscall', when=BP_BEFORE, action=self._syscall)
elif self.project.loader.main_object.os.startswith('UNIX'):
# Step forward until we catch up with QEMU
if self._trace and s.addr != self._trace[0]:
simgr = simgr.explore(find=self.project.entry)
simgr = simgr.drop(stash="unsat")
simgr = simgr.unstash(from_stash="found",to_stash="active")
if self.project.loader.main_object.os == 'cgc':
if self._use_cache:
cache_file = os.path.join("/tmp", "%(name)s-%(binhash)s.tcache")
cacher = Cacher(when=self._tracer_cache_cond,
container=cache_file,
dump_func=self._tracer_dump,
load_func=self._tracer_load)
simgr.use_technique(cacher)
# If we're restoring from a cache, we preconstrain. If we're not restoring from a cache,
# the cacher will preconstrain.
# If we're restoring from a cache, we can safely remove the cacher
# right after.
if os.path.exists(cacher.container):
simgr.one_active.preconstrainer.preconstrain_state()
simgr.remove_tech(cacher)
else:
simgr.one_active.preconstrainer.preconstrain_state()
def complete(self, simgr):
all_paths = simgr.active + simgr.deadended
if not len(simgr.active) or all_paths[0].globals['bb_cnt'] >= len(self._trace):
# this is a concrete trace, there should only be ONE path
if len(all_paths) != 1:
raise AngrTracerError("Program did not behave correctly, expected only one path.")
# the caller is responsible for removing preconstraints
simgr.stash(from_stash='active', to_stash='traced')
simgr.stash(from_stash='deadended', to_stash='traced')
return True
return False
def step(self, simgr, stash, **kwargs):
if stash != 'active':
raise Exception("TODO: tracer doesn't work with stashes other than active")
if len(simgr.active) == 1:
current = simgr.active[0]
if current.history.recent_block_count > 1:
# executed unicorn fix bb_cnt
current.globals['bb_cnt'] += current.history.recent_block_count - 1 - current.history.recent_syscall_count
if not self._no_follow:
# termination condition: we exhausted the dynamic trace log
if current.globals['bb_cnt'] >= len(self._trace):
return simgr
# now, we switch through several ways that the dynamic and symbolic traces can interact
# basic, convenient case: the two traces match
if current.addr == self._trace[current.globals['bb_cnt']]:
current.globals['bb_cnt'] += 1
# angr will count a syscall as a step, qemu will not. they will sync next step.
elif current.history.jumpkind.startswith("Ijk_Sys"):
pass
# handle library calls and simprocedures
elif self.project.is_hooked(current.addr) \
or self.project.simos.is_syscall_addr(current.addr) \
or not self._address_in_binary(current.addr):
# If dynamic trace is in the PLT stub, update bb_cnt until it's out
while current.globals['bb_cnt'] < len(self._trace) and self._addr_in_plt(self._trace[current.globals['bb_cnt']]):
current.globals['bb_cnt'] += 1
# handle hooked functions
# TODO: this branch is totally missed by the test cases
elif self.project.is_hooked(current.history.addr) \
and current.history.addr in self.project._sim_procedures:
l.debug("ending hook for %s", self.project.hooked_by(current.history.addr))
l.debug("previous addr %#x", current.history.addr)
l.debug("bb_cnt %d", current.globals['bb_cnt'])
# we need step to the return
current_addr = current.addr
while current.globals['bb_cnt'] < len(self._trace) and current_addr != self._trace[current.globals['bb_cnt']]:
current.globals['bb_cnt'] += 1
# step 1 more for the normal step that would happen
current.globals['bb_cnt'] += 1
l.debug("bb_cnt after the correction %d", current.globals['bb_cnt'])
if current.globals['bb_cnt'] >= len(self._trace):
return simgr
else:
l.error( "the dynamic trace and the symbolic trace disagreed")
l.error("[%s] dynamic [0x%x], symbolic [0x%x]",
self.project.filename,
self._trace[current.globals['bb_cnt']],
current.addr)
l.error("inputs was %r", current.preconstrainer.input_content)
if self._resiliency:
l.error("TracerMisfollowError encountered")
l.warning("entering no follow mode")
self._no_follow = True
else:
raise AngrTracerError
# maintain the predecessors list
self.predecessors.append(current)
self.predecessors.pop(0)
# Basic block's max size in angr is greater than the one in Qemu
# We follow the one in Qemu
if current.globals['bb_cnt'] >= len(self._trace):
bbl_max_bytes = 800
else:
y2 = self._trace[current.globals['bb_cnt']]
y1 = self._trace[current.globals['bb_cnt'] - 1]
bbl_max_bytes = y2 - y1
if bbl_max_bytes <= 0:
bbl_max_bytes = 800
# detect back loops (a block jumps back to the middle of itself) that have to be differentiated from the
# case where max block sizes doesn't match.
# this might still break for huge basic blocks with back loops, but it seems unlikely.
try:
bl = self.project.factory.block(self._trace[current.globals['bb_cnt']-1],
backup_state=current)
back_targets = set(bl.vex.constant_jump_targets) & set(bl.instruction_addrs)
if current.globals['bb_cnt'] < len(self._trace) and self._trace[current.globals['bb_cnt']] in back_targets:
target_to_jumpkind = bl.vex.constant_jump_targets_and_jumpkinds
if target_to_jumpkind[self._trace[current.globals['bb_cnt']]] == "Ijk_Boring":
bbl_max_bytes = 800
except (SimMemoryError, SimEngineError):
bbl_max_bytes = 800
# drop the missed stash before stepping, since driller needs missed paths later.
simgr.drop(stash='missed')
simgr._one_step(stash, size=bbl_max_bytes)
# if our input was preconstrained we have to keep on the lookout for unsat paths.
if current.preconstrainer._preconstrain_input:
simgr.stash(from_stash='unsat', to_stash='active')
simgr.drop(stash='unsat')
# if we stepped to a point where there are no active paths, return the simgr.
if len(simgr.active) == 0:
# possibly we want to have different behaviour if we're in crash mode.
return simgr
if len(simgr.active) > 1:
# if we get to this point there's more than one active path
# if we have to ditch the trace we use satisfiability
# or if a split occurs in a library routine
a_paths = simgr.active
if self._no_follow or all(map( lambda p: not self._address_in_binary(p.addr), a_paths)):
simgr.prune(to_stash='missed')
else:
l.debug("bb %d / %d", current.globals['bb_cnt'], len(self._trace))
if current.globals['bb_cnt'] < len(self._trace):
simgr.stash_not_addr(self._trace[current.globals['bb_cnt']], to_stash='missed')
if len(simgr.active) > 1: # rarely we get two active paths
simgr.prune(to_stash='missed')
if len(simgr.active) > 1: # might still be two active
simgr.stash(to_stash='missed', filter_func=lambda x: x.jumpkind == "Ijk_EmWarn")
# make sure we only have one or zero active paths at this point
assert len(simgr.active) < 2
# something weird... maybe we hit a rep instruction?
# qemu and vex have slightly different behaviors...
if not simgr.active[0].se.satisfiable():
l.info("detected small discrepancy between qemu and angr, "
"attempting to fix known cases")
# Have we corrected it?
corrected = False
# did our missed branch try to go back to a rep?
target = simgr.missed[0].addr
if self.project.arch.name == 'X86' or self.project.arch.name == 'AMD64':
# does it looks like a rep? rep ret doesn't count!
if self.project.factory.block(target).bytes.startswith("\xf3") and \
not self.project.factory.block(target).bytes.startswith("\xf3\xc3"):
l.info("rep discrepency detected, repairing...")
# swap the stashes
simgr.move('missed', 'chosen')
simgr.move('active', 'missed')
simgr.move('chosen', 'active')
corrected = True
if not corrected:
l.warning("Unable to correct discrepancy between qemu and angr.")
return simgr
def _syscall(self, state):
syscall_addr = state.se.eval(state.ip)
args = None
# 0xa000008 is terminate, which we exclude from syscall statistics.
if self.project.loader.main_object.os == 'cgc' and syscall_addr != 0xa000008:
args = SYSCALL_CC['X86']['CGC'](self.project.arch).get_args(state, 4)
else:
args = SYSCALL_CC[self.project.arch.name]['Linux'](self.project.arch).get_arbs(state, 4)
if args is not None:
d = {'addr': syscall_addr}
for i in xrange(4):
d['arg_%d' % i] = args[i]
d['arg_%d_symbolic' % i] = args[i].symbolic
self._syscalls.append(d)
def _address_in_binary(self, addr):
"""
Determine if address @addr is in the binary being traced.
:param addr: the address to test
:return: True if the address is in between the binary's min and max addresses.
"""
mb = self.project.loader.main_object
return mb.min_addr <= addr and addr < mb.max_addr
def _addr_in_plt(self, addr):
"""
Check if an address is inside the plt section
"""
plt = self.project.loader.main_object.sections_map.get('.plt', None)
return False if plt is None else addr >= plt.min_addr and addr <= plt.max_addr
@staticmethod
def _tracer_cache_cond(state):
if state.history.jumpkind.startswith('Ijk_Sys'):
sys_procedure = state.project.simos.syscall(state)
if sys_procedure.display_name == 'receive' and state.se.eval(state.posix.files[0].pos) == 0:
return True
return False
@staticmethod
def _tracer_load(container, simgr):
preconstrainer = simgr.one_active.preconstrainer
if type(preconstrainer.input_content) == str:
fs = {'/dev/stdin': SimFile("/dev/stdin", "r", size=len(preconstrainer.input_content))}
else:
fs = preconstrainer.input_content.stdin
project = simgr._project
cached_project = project.load_function(container)
if cached_project is not None:
cached_project.analyses = project.analyses
cached_project.surveyors = project.surveyors
cached_project.store_function = project.store_function
cached_project.load_function = project.load_function
state = cached_project.storage['cached_states'][0]
state.globals['bb_cnt'] = cached_project.storage['bb_cnt']
claripy.ast.base.var_counter = cached_project.storage['var_cnt']
cached_project.storage = None
# Setting up the cached state
state.project = cached_project
simgr._project = cached_project
# Hookup the new files
for name in fs:
fs[name].set_state(state)
for fd in state.posix.files:
if state.posix.files[fd].name == name:
state.posix.files[fd] = fs[name]
break
state.register_plugin('preconstrainer', preconstrainer)
state.history.recent_block_count = 0
# Setting the cached state to the simgr
simgr.stashes['active'] = [state]
else:
l.error("Something went wrong during Project unpickling for Tracer...")
@staticmethod
def _tracer_dump(container, simgr, stash):
if stash != 'active':
raise Exception("TODO: tracer doesn't work with stashes other than active")
s = simgr.stashes[stash][0]
project = s.project
s.project = None
s.history.trim()
project.storage['cached_states'] = [s]
project.storage['bb_cnt'] = s.globals['bb_cnt']
project.storage['var_cnt'] = claripy.ast.base.var_counter
project.store_function(container)
s.project = project
# Add preconstraints to state
s.preconstrainer.preconstrain_state()
|
bsd-2-clause
| 2,718,564,388,932,743,000
| 42.42328
| 133
| 0.569148
| false
| 4.130347
| false
| false
| false
|
antoine-de/navitia
|
source/jormungandr/jormungandr/interfaces/v1/Coord.py
|
1
|
3363
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask.ext.restful import abort, marshal
from jormungandr import i_manager
from jormungandr.interfaces.v1.ResourceUri import ResourceUri
from jormungandr.interfaces.v1.fields import address
from navitiacommon.type_pb2 import _NAVITIATYPE
from collections import OrderedDict
import datetime
class Coord(ResourceUri):
def get(self, region=None, lon=None, lat=None, id=None, *args, **kwargs):
if id is not None:
splitted = id.split(";")
if len(splitted) != 2:
abort(404, message='invalid coords [{}], should be <lon:lon>;<lat:lat>'.format(id))
lon, lat = splitted
try:
lon = float(lon)
lat = float(lat)
except ValueError:
abort(404, message='invalid coords [{}], should be <lon:lon>;<lat:lat>'.format(id))
if region is None:
regions = i_manager.get_regions("", lon, lat)
else:
regions = [region]
args = {
"uri": "coord:{}:{}".format(lon, lat),
"count": 1,
"distance": 200,
"type[]": ["address"],
"depth": 1,
"start_page": 0,
"filter": "",
"_current_datetime": datetime.datetime.utcnow()
}
self._register_interpreted_parameters(args)
result = OrderedDict()
for r in regions:
self.region = r
result.update(regions=[r])
pb_result = i_manager.dispatch(args, "places_nearby", instance_name=r)
if len(pb_result.places_nearby) > 0:
e_type = pb_result.places_nearby[0].embedded_type
if _NAVITIATYPE.values_by_name["ADDRESS"].number == e_type:
new_address = marshal(pb_result.places_nearby[0].address,
address)
result.update(address=new_address)
return result, 200
result.update(regions=regions)
result.update(message="No address for these coords")
return result, 404
|
agpl-3.0
| 307,073,991,383,605,900
| 37.655172
| 99
| 0.627119
| false
| 3.915017
| false
| false
| false
|
stdlib-js/stdlib
|
lib/node_modules/@stdlib/math/base/special/riemann-zeta/benchmark/python/scipy/benchmark.py
|
1
|
2194
|
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark scipy.special.zeta."""
from __future__ import print_function
import timeit
NAME = "zeta"
REPEATS = 3
ITERATIONS = 100000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.special import zeta; from random import random;"
stmt = "y = zeta(random()*56.0 + 1.1)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::scipy::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
|
apache-2.0
| 1,624,637,104,917,445,400
| 21.618557
| 74
| 0.628532
| false
| 3.662771
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/lib-tk/tkColorChooser.py
|
1
|
1064
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tkColorChooser.py
from tkCommonDialog import Dialog
class Chooser(Dialog):
"""Ask for a color"""
command = 'tk_chooseColor'
def _fixoptions(self):
try:
color = self.options['initialcolor']
if isinstance(color, tuple):
self.options['initialcolor'] = '#%02x%02x%02x' % color
except KeyError:
pass
def _fixresult(self, widget, result):
if not result or not str(result):
return (None, None)
else:
r, g, b = widget.winfo_rgb(result)
return (
(
r / 256, g / 256, b / 256), str(result))
def askcolor(color=None, **options):
"""Ask for a color"""
if color:
options = options.copy()
options['initialcolor'] = color
return Chooser(**options).show()
if __name__ == '__main__':
print 'color', askcolor()
|
unlicense
| 8,246,888,322,872,728,000
| 26.307692
| 70
| 0.56485
| false
| 3.57047
| false
| false
| false
|
hilljb/pyclojure
|
pyclojure/clojure.py
|
1
|
2708
|
#!/usr/bin/python2
""" A Python module wrapping nrepl to interact with a Leiningen Clojure REPL.
Some notes:
- All return values are unicode objects inside a dictionary.
- You need to explicitly kill the repl before exiting.
Example:
>>> import clojure
>>> repl = clojure.NREPL()
nREPL server started on port 57041 on host 127.0.0.1 - nrepl://127.0.0.1:57041
>>> repl.eval('(defn f [a b] (+ a b))')
{u'session': u'658b32e6-ee3f-4a44-aa24-06ce375e4fb4', u'ns': u'user', u'value': u"#'user/f"}
>>> repl.eval('(f 1 2)')
{u'session': u'32ca0012-0fc1-4170-977c-6d480f678766', u'ns': u'user', u'value': u'3'}
"""
__author__ = "Jason B. Hill"
__email__ = "jason@jasonbhill.com"
import os
import subprocess
import nrepl
class NREPL(object):
""" Create a Leiningen NREPL and interact with it.
"""
def __init__(self, port=None):
""" Initiate a Leiningen NREPL.
INPUT
-----
port : int : optional
The port to use for the NREPL server.
"""
# Make sure the port is a positive integer
if port:
if not isinstance(port, (int, long)):
raise TypeError("NREPL port must be an integer: %s given" % port)
if port < 1:
raise ValueError("NREPL port must be greater than zero: %s given" % port)
self.port = port
self.host = 'localhost'
# Form the command to execute
cmd = "lein repl :headless"
if self.port:
cmd += " :port %s" % self.port
# Execute the command
proc = subprocess.Popen(
cmd.split(),
stdout=subprocess.PIPE,
stderr=open(os.devnull,'w'),
stdin=open(os.devnull,'w')
)
# Get the return string and parse the port
retport = None
while not retport:
retline = proc.stdout.readline()
if 'server started' in retline:
print retline
retport = retline.split('port')[1].split()[0]
if retport:
self.port = retport
def eval(self, cmd):
""" Evaluate a command using the attached NREPL.
INPUT
-----
cmd : str
The command to execute.
OUTPUT
------
A dictionary with u'session', u'ns', and u'value'.
"""
host_string = 'nrepl://' + str(self.host) + ':' + str(self.port)
c = nrepl.connect(host_string)
c.write({"op": "eval", "code": cmd})
print "%s" % c.read()
def exit(self):
""" Shut down the NREPL server.
This method should be called so the NREPL server is not zombied.
"""
self.eval('(System/exit 0)')
|
epl-1.0
| -8,702,635,536,927,137,000
| 26.08
| 92
| 0.553176
| false
| 3.563158
| false
| false
| false
|
aewallin/opencamlib
|
examples/python/waterline/waterline_2_tux_adapt.py
|
1
|
3113
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawLoops(myscreen, loops, loopcolor):
nloop = 0
for lop in loops:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopcolor) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) )
previous=p
n=n+1
print("rendered loop ",nloop, " with ", len(lop), " points")
nloop = nloop+1
if __name__ == "__main__":
print(ocl.version())
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../../stl/demo.stl")
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
#stl = camvtk.STLSurf("../../stl/waterline1.stl")
myscreen.addActor(stl)
stl.SetWireframe() # render tux as wireframe
#stl.SetSurface() # render tux as surface
stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print("STL surface read,", s.size(), "triangles")
zh=1.75145
diam = 1.4
length = 500
loops = []
#cutter = ocl.CylCutter( diam , length )
cutter = ocl.BallCutter( diam , length )
#cutter = ocl.BullCutter( diam , diam/5, length )
wl = ocl.Waterline()
wl.setSTL(s)
wl.setCutter(cutter)
wl.setZ(zh)
wl.setSampling(0.5)
#wl.setThreads(5)
t_before = time.time()
wl.run()
t_after = time.time()
calctime = t_after-t_before
print(" Waterline done in ", calctime," s")
cutter_loops = wl.getLoops()
for l in cutter_loops:
loops.append(l)
aloops = []
awl = ocl.AdaptiveWaterline()
awl.setSTL(s)
awl.setCutter(cutter)
awl.setZ(zh)
awl.setSampling(0.1)
awl.setMinSampling(0.01)
#wl.setThreads(5)
t_before = time.time()
awl.run()
t_after = time.time()
calctime = t_after-t_before
print(" AdaptiveWaterline done in ", calctime," s")
acutter_loops = awl.getLoops()
for l in acutter_loops:
aloops.append(l)
print("All waterlines done. Got", len(loops)," loops in total.")
# draw the loops
drawLoops(myscreen, loops, camvtk.yellow)
drawLoops(myscreen, aloops, camvtk.red)
print("done.")
myscreen.camera.SetPosition(15, 13, 7)
myscreen.camera.SetFocalPoint(5, 5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
lgpl-2.1
| -864,676,445,629,931,900
| 30.444444
| 138
| 0.592033
| false
| 2.858586
| false
| false
| false
|
hackfestca/cnb
|
cnb/modAvailable/CNBMMChuckKukDo.py
|
1
|
1398
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
CNB Matrix Module - chuckkukdo
'''
from cnb.cnbMatrixModule import CNBMatrixModule
class CNBMMChuckKukDo(CNBMatrixModule):
"""
"""
name = 'chuckkukdo'
usage = 'chuckkukdo'
desc = 'Print the Chuck Kuk Do (http://en.wikipedia.org/wiki/Chun_Kuk_Do)'
aliases = []
def __init__(self,log):
CNBMatrixModule.__init__(self,log)
def __del__(self):
pass
def processCmd(self, oMsg):
result = " \n \
1- I will develop myself to the maximum of my potential in all ways.\n \
2- I will forget the mistakes of the past and press on to greater achievements.\n \
3- I will continually work at developing love, happiness and loyalty in my family.\n \
4- I will look for the good in all people and make them feel worthwhile.\n \
5- If I have nothing good to say about a person, I will say nothing.\n \
6- I will always be as enthusiastic about the success of others as I am about my own.\n \
7- I will maintain an attitude of open-mindedness.\n \
8- I will maintain respect for those in authority and demonstrate this respect at all times.\n \
9- I will always remain loyal to my God, my country, family and my friends.\n \
10- I will remain highly goal-oriented throughout my life because that positive attitude helps my family, my country and myself.\n \
"
return result
|
gpl-3.0
| 5,698,115,309,628,994,000
| 33.097561
| 133
| 0.679542
| false
| 3.312796
| false
| false
| false
|
krishnab-datakind/mining-data-acquisition
|
data_gather/PackageConstants.py
|
1
|
4082
|
#!/usr/bin/python
"""
Package constants
"""
## MIT License
##
## Copyright (c) 2017, krishna bhogaonker
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'krishna bhogaonker'
__copyright__ = 'copyright 2017'
__credits__ = ['krishna bhogaonker']
__license__ = "MIT"
__version__ = '0.1.0'
__maintainer__ = 'krishna bhogaonker'
__email__ = 'cyclotomiq@gmail.com'
__status__ = 'pre-alpha'
from aenum import Enum
class RequestTypes(Enum):
SIMPLEPOINTIMAGERY = 1
DIVAGIS = 2
COMPOSITEDPOINTIMAGERY = 3
class RequestStatusCodes(Enum):
CLOSED = 0
CREATED = 1
QUEUED = 2
PROCESSING = 3
COMPLETED = 4
REJECTED = 5
ERROR = 6
imgCollections = {'Landsat8' : ImageCollection('LANDSAT/LC08/C01/T1',
['B1','B2','B3','B4','B5','B6','B7','B8','B9','B10','B11','BQA'],
'04/13/2011',
'10/07/2017',
30),
'Landsat7' : ImageCollection('LANDSAT/LE07/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],
'01/01/1999',
'09/17/2017',
30),
'Landsat5' : ImageCollection('LANDSAT/LT05/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],
'01/01/1984',
'05/05/2012',
30),
'Sentinel2msi' : ImageCollection('COPERNICUS/S2',
['B1','B2','B3','B4','B5','B6','B7','B8','B8A','B9','B10','B11','QA10','QA20','QA60'],
'01/23/2015',
'10/20/2017',
30),
'Sentinel2sar' : ImageCollection('COPERNICUS/S1_GRD',
['VV', 'HH',['VV', 'VH'], ['HH','HV']],
'10/03/2014',
'10/20/2017',
30),
'ModisThermalAnomalies' : ImageCollection('MODIS/006/MOD14A1',
['FireMask', 'MaxFRP','sample', 'QA'],
'02/18/2000',
'10/23/2017',
30)
}
if __name__ == "__main__":
print('set of package constants.')
|
mit
| -3,745,854,114,414,277,600
| 48.780488
| 463
| 0.430671
| false
| 4.622877
| false
| false
| false
|
popazerty/dvbapp-gui2
|
skin.py
|
1
|
33432
|
from Tools.Profile import profile
profile("LOAD:ElementTree")
import xml.etree.cElementTree
import os
profile("LOAD:enigma_skin")
from enigma import eSize, ePoint, eRect, gFont, eWindow, eLabel, ePixmap, eWindowStyleManager, addFont, gRGB, eWindowStyleSkinned, getDesktop
from Components.config import ConfigSubsection, ConfigText, config
from Components.Sources.Source import ObsoleteSource
from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_SKIN_IMAGE, SCOPE_FONTS, SCOPE_ACTIVE_SKIN, SCOPE_ACTIVE_LCDSKIN, SCOPE_CURRENT_SKIN, SCOPE_CONFIG, fileExists
from Tools.Import import my_import
from Tools.LoadPixmap import LoadPixmap
from Components.RcModel import rc_model
colorNames = {}
# Predefined fonts, typically used in built-in screens and for components like
# the movie list and so.
fonts = {
"Body": ("Regular", 18, 22, 16),
"ChoiceList": ("Regular", 20, 24, 18),
}
def dump(x, i=0):
print " " * i + str(x)
try:
for n in x.childNodes:
dump(n, i + 1)
except:
None
class SkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "{%s}: %s. Please contact the skin's author!" % (config.skin.primary_skin.value, self.msg)
class DisplaySkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "{%s}: %s. Please contact the skin's author!" % (config.skin.display_skin.value, self.msg)
dom_skins = [ ]
def addSkin(name, scope = SCOPE_SKIN):
# read the skin
filename = resolveFilename(scope, name)
if fileExists(filename):
mpath = os.path.dirname(filename) + "/"
file = open(filename, 'r')
dom_skins.append((mpath, xml.etree.cElementTree.parse(file).getroot()))
file.close()
return True
return False
# get own skin_user_skinname.xml file, if exist
def skin_user_skinname():
name = "skin_user_" + config.skin.primary_skin.value[:config.skin.primary_skin.value.rfind('/')] + ".xml"
filename = resolveFilename(SCOPE_CONFIG, name)
if fileExists(filename):
return name
return None
# we do our best to always select the "right" value
# skins are loaded in order of priority: skin with
# highest priority is loaded last, usually the user-provided
# skin.
# currently, loadSingleSkinData (colors, bordersets etc.)
# are applied one-after-each, in order of ascending priority.
# the dom_skin will keep all screens in descending priority,
# so the first screen found will be used.
# example: loadSkin("nemesis_greenline/skin.xml")
config.skin = ConfigSubsection()
DEFAULT_SKIN = "a4you/skin.xml"
if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)):
# in that case, fallback to Magic (which is an SD skin)
DEFAULT_SKIN = "skin.xml"
config.skin.primary_skin = ConfigText(default=DEFAULT_SKIN)
DEFAULT_DISPLAY_SKIN = "skin_display.xml"
config.skin.display_skin = ConfigText(default=DEFAULT_DISPLAY_SKIN)
profile("LoadSkin")
try:
name = skin_user_skinname()
if name is not None:
addSkin(name, SCOPE_CONFIG)
else:
addSkin('skin_user.xml', SCOPE_CONFIG)
except (SkinError, IOError, AssertionError), err:
print "not loading user skin: ", err
# some boxes lie about their dimensions
addSkin('skin_box.xml')
# add optional discrete second infobar
addSkin('skin_second_infobar.xml')
display_skin_id = 1
try:
if not addSkin(os.path.join('display', config.skin.display_skin.value)):
raise DisplaySkinError, "display skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_DISPLAY_SKIN
if config.skin.display_skin.value == skin:
skin = 'skin_display.xml'
print "defaulting to standard display skin...", skin
config.skin.display_skin.value = skin
skin = os.path.join('display', skin)
addSkin(skin)
del skin
addSkin('skin_subtitles.xml')
try:
if not addSkin(config.skin.primary_skin.value):
raise SkinError, "primary skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_SKIN
if config.skin.primary_skin.value == skin:
skin = 'skin.xml'
print "defaulting to standard skin...", skin
config.skin.primary_skin.value = skin
addSkin(skin)
del skin
addSkin('skin_default.xml')
profile("LoadSkinDefaultDone")
def parseCoordinate(s, e, size=0, font=None):
s = s.strip()
if s == "center":
val = (e - size)/2
elif s == '*':
return None
else:
if s[0] is 'e':
val = e
s = s[1:]
elif s[0] is 'c':
val = e/2
s = s[1:]
else:
val = 0
if s:
if s[-1] is '%':
val += e * int(s[:-1]) / 100
elif s[-1] is 'w':
val += fonts[font][3] * int(s[:-1])
elif s[-1] is 'h':
val += fonts[font][2] * int(s[:-1])
else:
val += int(s)
if val < 0:
val = 0
return val
def getParentSize(object, desktop):
size = eSize()
if object:
parent = object.getParent()
# For some widgets (e.g. ScrollLabel) the skin attributes are applied to
# a child widget, instead of to the widget itself. In that case, the parent
# we have here is not the real parent, but it is the main widget.
# We have to go one level higher to get the actual parent.
# We can detect this because the 'parent' will not have a size yet
# (the main widget's size will be calculated internally, as soon as the child
# widget has parsed the skin attributes)
if parent and parent.size().isEmpty():
parent = parent.getParent()
if parent:
size = parent.size()
elif desktop:
#widget has no parent, use desktop size instead for relative coordinates
size = desktop.size()
return size
def parsePosition(s, scale, object = None, desktop = None, size = None):
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width(), size and size.width())
yval = parseCoordinate(y, parentsize.height(), size and size.height())
return ePoint(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseSize(s, scale, object = None, desktop = None):
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width())
yval = parseCoordinate(y, parentsize.height())
return eSize(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseFont(s, scale):
try:
f = fonts[s]
name = f[0]
size = f[1]
except:
name, size = s.split(';')
return gFont(name, int(size) * scale[0][0] / scale[0][1])
def parseColor(s):
if s[0] != '#':
try:
return colorNames[s]
except:
raise SkinError("color '%s' must be #aarrggbb or valid named color" % s)
return gRGB(int(s[1:], 0x10))
def collectAttributes(skinAttributes, node, context, skin_path_prefix=None, ignore=(), filenames=frozenset(("pixmap", "pointer", "seek_pointer", "backgroundPixmap", "selectionPixmap", "sliderPixmap", "scrollbarbackgroundPixmap"))):
# walk all attributes
size = None
pos = None
font = None
for attrib, value in node.items():
if attrib not in ignore:
if attrib in filenames:
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, value, path_prefix=skin_path_prefix)
if fileExists(resolveFilename(SCOPE_ACTIVE_LCDSKIN, value, path_prefix=skin_path_prefix)):
pngfile = resolveFilename(SCOPE_ACTIVE_LCDSKIN, value, path_prefix=skin_path_prefix)
value = pngfile
# Bit of a hack this, really. When a window has a flag (e.g. wfNoBorder)
# it needs to be set at least before the size is set, in order for the
# window dimensions to be calculated correctly in all situations.
# If wfNoBorder is applied after the size has been set, the window will fail to clear the title area.
# Similar situation for a scrollbar in a listbox; when the scrollbar setting is applied after
# the size, a scrollbar will not be shown until the selection moves for the first time
if attrib == 'size':
size = value.encode("utf-8")
elif attrib == 'position':
pos = value.encode("utf-8")
elif attrib == 'font':
font = value.encode("utf-8")
skinAttributes.append((attrib, font))
else:
skinAttributes.append((attrib, value.encode("utf-8")))
if pos is not None:
pos, size = context.parse(pos, size, font)
skinAttributes.append(('position', pos))
if size is not None:
skinAttributes.append(('size', size))
def morphRcImagePath(value):
if rc_model.rcIsDefault() is False:
if ('rc.png' or 'oldrc.png') in value:
value = rc_model.getRcLocation() + 'rc.png'
return value
def loadPixmap(path, desktop):
option = path.find("#")
if option != -1:
path = path[:option]
ptr = LoadPixmap(morphRcImagePath(path), desktop)
if ptr is None:
raise SkinError("pixmap file %s not found!" % path)
return ptr
class AttributeParser:
def __init__(self, guiObject, desktop, scale = ((1,1),(1,1))):
self.guiObject = guiObject
self.desktop = desktop
self.scale = scale
def applyOne(self, attrib, value):
try:
getattr(self, attrib)(value)
except AttributeError:
print "[Skin] Attribute not implemented:", attrib, "value:", value
except SkinError, ex:
print "[Skin] Error:", ex
def applyAll(self, attrs):
for attrib, value in attrs:
try:
getattr(self, attrib)(value)
except AttributeError:
print "[Skin] Attribute not implemented:", attrib, "value:", value
except SkinError, ex:
print "[Skin] Error:", ex
def conditional(self, value):
pass
def position(self, value):
if isinstance(value, tuple):
self.guiObject.move(ePoint(*value))
else:
self.guiObject.move(parsePosition(value, self.scale, self.guiObject, self.desktop, self.guiObject.csize()))
def size(self, value):
if isinstance(value, tuple):
self.guiObject.resize(eSize(*value))
else:
self.guiObject.resize(parseSize(value, self.scale, self.guiObject, self.desktop))
def title(self, value):
self.guiObject.setTitle(_(value))
def text(self, value):
self.guiObject.setText(_(value))
def font(self, value):
self.guiObject.setFont(parseFont(value, self.scale))
def zPosition(self, value):
self.guiObject.setZPosition(int(value))
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setPixmap(ptr)
def backgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setBackgroundPicture(ptr)
def selectionPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSelectionPicture(ptr)
def sliderPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSliderPicture(ptr)
def scrollbarbackgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setScrollbarBackgroundPicture(ptr)
def alphatest(self, value):
self.guiObject.setAlphatest(
{ "on": 1,
"off": 0,
"blend": 2,
}[value])
def scale(self, value):
self.guiObject.setScale(1)
def orientation(self, value): # used by eSlider
try:
self.guiObject.setOrientation(*
{ "orVertical": (self.guiObject.orVertical, False),
"orTopToBottom": (self.guiObject.orVertical, False),
"orBottomToTop": (self.guiObject.orVertical, True),
"orHorizontal": (self.guiObject.orHorizontal, False),
"orLeftToRight": (self.guiObject.orHorizontal, False),
"orRightToLeft": (self.guiObject.orHorizontal, True),
}[value])
except KeyError:
print "oprientation must be either orVertical or orHorizontal!, not %s. Please contact the skin's author!" % value
def valign(self, value):
try:
self.guiObject.setVAlign(
{ "top": self.guiObject.alignTop,
"center": self.guiObject.alignCenter,
"bottom": self.guiObject.alignBottom
}[value])
except KeyError:
print "valign must be either top, center or bottom!, not %s. Please contact the skin's author!" % value
def halign(self, value):
try:
self.guiObject.setHAlign(
{ "left": self.guiObject.alignLeft,
"center": self.guiObject.alignCenter,
"right": self.guiObject.alignRight,
"block": self.guiObject.alignBlock
}[value])
except KeyError:
print "halign must be either left, center, right or block!, not %s. Please contact the skin's author!" % value
def textOffset(self, value):
x, y = value.split(',')
self.guiObject.setTextOffset(ePoint(int(x) * self.scale[0][0] / self.scale[0][1], int(y) * self.scale[1][0] / self.scale[1][1]))
def flags(self, value):
flags = value.split(',')
for f in flags:
try:
fv = eWindow.__dict__[f]
self.guiObject.setFlag(fv)
except KeyError:
print "illegal flag %s!" % f
def backgroundColor(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def backgroundColorSelected(self, value):
self.guiObject.setBackgroundColorSelected(parseColor(value))
def foregroundColor(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def foregroundColorSelected(self, value):
self.guiObject.setForegroundColorSelected(parseColor(value))
def foregroundNotCrypted(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def backgroundNotCrypted(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def foregroundCrypted(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def backgroundCrypted(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def foregroundEncrypted(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def backgroundEncrypted(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def shadowColor(self, value):
self.guiObject.setShadowColor(parseColor(value))
def selectionDisabled(self, value):
self.guiObject.setSelectionEnable(0)
def transparent(self, value):
self.guiObject.setTransparent(int(value))
def borderColor(self, value):
self.guiObject.setBorderColor(parseColor(value))
def borderWidth(self, value):
self.guiObject.setBorderWidth(int(value))
def scrollbarMode(self, value):
self.guiObject.setScrollbarMode(getattr(self.guiObject, value))
# { "showOnDemand": self.guiObject.showOnDemand,
# "showAlways": self.guiObject.showAlways,
# "showNever": self.guiObject.showNever,
# "showLeft": self.guiObject.showLeft
# }[value])
def enableWrapAround(self, value):
self.guiObject.setWrapAround(True)
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scale)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(0, ptr, pos)
def seek_pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scale)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(1, ptr, pos)
def shadowOffset(self, value):
self.guiObject.setShadowOffset(parsePosition(value, self.scale))
def noWrap(self, value):
self.guiObject.setNoWrap(1)
def applySingleAttribute(guiObject, desktop, attrib, value, scale = ((1,1),(1,1))):
# Someone still using applySingleAttribute?
AttributeParser(guiObject, desktop, scale).applyOne(attrib, value)
def applyAllAttributes(guiObject, desktop, attributes, scale):
AttributeParser(guiObject, desktop, scale).applyAll(attributes)
def loadSingleSkinData(desktop, skin, path_prefix):
"""loads skin data like colors, windowstyle etc."""
assert skin.tag == "skin", "root element in skin must be 'skin'!"
for c in skin.findall("output"):
id = c.attrib.get('id')
if id:
id = int(id)
else:
id = 0
if id == 0: # framebuffer
for res in c.findall("resolution"):
get_attr = res.attrib.get
xres = get_attr("xres")
if xres:
xres = int(xres)
else:
xres = 720
yres = get_attr("yres")
if yres:
yres = int(yres)
else:
yres = 576
bpp = get_attr("bpp")
if bpp:
bpp = int(bpp)
else:
bpp = 32
#print "Resolution:", xres,yres,bpp
from enigma import gMainDC
gMainDC.getInstance().setResolution(xres, yres)
desktop.resize(eSize(xres, yres))
if bpp != 32:
# load palette (not yet implemented)
pass
for skininclude in skin.findall("include"):
filename = skininclude.attrib.get("filename")
if filename:
skinfile = resolveFilename(SCOPE_ACTIVE_SKIN, filename, path_prefix=path_prefix)
if not fileExists(skinfile):
skinfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)
if fileExists(skinfile):
print "[SKIN] loading include:", skinfile
loadSkin(skinfile)
for c in skin.findall("colors"):
for color in c.findall("color"):
get_attr = color.attrib.get
name = get_attr("name")
color = get_attr("value")
if name and color:
colorNames[name] = parseColor(color)
#print "Color:", name, color
else:
raise SkinError("need color and name, got %s %s" % (name, color))
for c in skin.findall("fonts"):
for font in c.findall("font"):
get_attr = font.attrib.get
filename = get_attr("filename", "<NONAME>")
name = get_attr("name", "Regular")
scale = get_attr("scale")
if scale:
scale = int(scale)
else:
scale = 100
is_replacement = get_attr("replacement") and True or False
render = get_attr("render")
if render:
render = int(render)
else:
render = 0
resolved_font = resolveFilename(SCOPE_FONTS, filename, path_prefix=path_prefix)
if not fileExists(resolved_font): #when font is not available look at current skin path
resolved_font = resolveFilename(SCOPE_ACTIVE_SKIN, filename)
if fileExists(resolveFilename(SCOPE_CURRENT_SKIN, filename)):
resolved_font = resolveFilename(SCOPE_CURRENT_SKIN, filename)
elif fileExists(resolveFilename(SCOPE_ACTIVE_LCDSKIN, filename)):
resolved_font = resolveFilename(SCOPE_ACTIVE_LCDSKIN, filename)
addFont(resolved_font, name, scale, is_replacement, render)
#print "Font: ", resolved_font, name, scale, is_replacement
for alias in c.findall("alias"):
get = alias.attrib.get
try:
name = get("name")
font = get("font")
size = int(get("size"))
height = int(get("height", size)) # to be calculated some day
width = int(get("width", size))
global fonts
fonts[name] = (font, size, height, width)
except Exception, ex:
print "[SKIN] bad font alias", ex
for c in skin.findall("subtitles"):
from enigma import eSubtitleWidget
scale = ((1,1),(1,1))
for substyle in c.findall("sub"):
get_attr = substyle.attrib.get
font = parseFont(get_attr("font"), scale)
col = get_attr("foregroundColor")
if col:
foregroundColor = parseColor(col)
haveColor = 1
else:
foregroundColor = gRGB(0xFFFFFF)
haveColor = 0
col = get_attr("borderColor")
if col:
borderColor = parseColor(col)
else:
borderColor = gRGB(0)
borderwidth = get_attr("borderWidth")
if borderwidth is None:
# default: use a subtitle border
borderWidth = 3
else:
borderWidth = int(borderwidth)
face = eSubtitleWidget.__dict__[get_attr("name")]
eSubtitleWidget.setFontStyle(face, font, haveColor, foregroundColor, borderColor, borderWidth)
for windowstyle in skin.findall("windowstyle"):
style = eWindowStyleSkinned()
style_id = windowstyle.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
# defaults
font = gFont("Regular", 20)
offset = eSize(20, 5)
for title in windowstyle.findall("title"):
get_attr = title.attrib.get
offset = parseSize(get_attr("offset"), ((1,1),(1,1)))
font = parseFont(get_attr("font"), ((1,1),(1,1)))
style.setTitleFont(font)
style.setTitleOffset(offset)
#print " ", font, offset
for borderset in windowstyle.findall("borderset"):
bsName = str(borderset.attrib.get("name"))
for pixmap in borderset.findall("pixmap"):
get_attr = pixmap.attrib.get
bpName = get_attr("pos")
filename = get_attr("filename")
if filename and bpName:
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, filename, path_prefix=path_prefix)
if fileExists(resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)):
pngfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)
png = loadPixmap(pngfile, desktop)
style.setPixmap(eWindowStyleSkinned.__dict__[bsName], eWindowStyleSkinned.__dict__[bpName], png)
#print " borderset:", bpName, filename
for color in windowstyle.findall("color"):
get_attr = color.attrib.get
colorType = get_attr("name")
color = parseColor(get_attr("color"))
try:
style.setColor(eWindowStyleSkinned.__dict__["col" + colorType], color)
except:
raise SkinError("Unknown color %s" % colorType)
#pass
#print " color:", type, color
x = eWindowStyleManager.getInstance()
x.setStyle(style_id, style)
for margin in skin.findall("margin"):
style_id = margin.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
r = eRect(0,0,0,0)
v = margin.attrib.get("left")
if v:
r.setLeft(int(v))
v = margin.attrib.get("top")
if v:
r.setTop(int(v))
v = margin.attrib.get("right")
if v:
r.setRight(int(v))
v = margin.attrib.get("bottom")
if v:
r.setBottom(int(v))
# the "desktop" parameter is hardcoded to the UI screen, so we must ask
# for the one that this actually applies to.
getDesktop(style_id).setMargins(r)
dom_screens = {}
def loadSkin(name, scope = SCOPE_SKIN):
# Now a utility for plugins to add skin data to the screens
global dom_screens, display_skin_id
filename = resolveFilename(scope, name)
if fileExists(filename):
path = os.path.dirname(filename) + "/"
file = open(filename, 'r')
for elem in xml.etree.cElementTree.parse(file).getroot():
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
# Clear old versions, save memory
dom_screens[name][0].clear()
dom_screens[name] = (elem, path)
else:
elem.clear()
else:
elem.clear()
file.close()
def loadSkinData(desktop):
# Kinda hackish, but this is called once by mytest.py
global dom_skins
skins = dom_skins[:]
skins.reverse()
for (path, dom_skin) in skins:
loadSingleSkinData(desktop, dom_skin, path)
for elem in dom_skin:
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
# Kill old versions, save memory
dom_screens[name][0].clear()
dom_screens[name] = (elem, path)
else:
# without name, it's useless!
elem.clear()
else:
# non-screen element, no need for it any longer
elem.clear()
# no longer needed, we know where the screens are now.
#del dom_skins
#Developer Mode
dom_skins = []
class additionalWidget:
def __init__(self):
pass
# Class that makes a tuple look like something else. Some plugins just assume
# that size is a string and try to parse it. This class makes that work.
class SizeTuple(tuple):
def split(self, *args):
return str(self[0]), str(self[1])
def strip(self, *args):
return '%s,%s' % self
def __str__(self):
return '%s,%s' % self
class SkinContext:
def __init__(self, parent=None, pos=None, size=None, font=None):
if parent is not None:
if pos is not None:
pos, size = parent.parse(pos, size, font)
self.x, self.y = pos
self.w, self.h = size
else:
self.x = None
self.y = None
self.w = None
self.h = None
def __str__(self):
return "Context (%s,%s)+(%s,%s) " % (self.x, self.y, self.w, self.h)
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
self.w = 0
self.h = 0
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
self.h -= h
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
self.h -= h
self.y += h
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
self.x += w
self.w -= w
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
self.w -= w
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return SizeTuple(pos), SizeTuple(size)
class SkinContextStack(SkinContext):
# A context that stacks things instead of aligning them
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return SizeTuple(pos), SizeTuple(size)
def readSkin(screen, skin, names, desktop):
if config.atemio.skindevelopermode.value:
# it's not elegant and low performace... but for skin developing is great!
addSkin(config.skin.primary_skin.value)
loadSkinData(desktop)
if not isinstance(names, list):
names = [names]
# try all skins, first existing one have priority
global dom_screens
for n in names:
myscreen, path = dom_screens.get(n, (None,None))
if myscreen is not None:
# use this name for debug output
name = n
break
else:
name = "<embedded-in-'%s'>" % screen.__class__.__name__
# otherwise try embedded skin
if myscreen is None:
myscreen = getattr(screen, "parsedSkin", None)
# try uncompiled embedded skin
if myscreen is None and getattr(screen, "skin", None):
skin = screen.skin
print "[SKIN] Parsing embedded skin", name
if isinstance(skin, tuple):
for s in skin:
candidate = xml.etree.cElementTree.fromstring(s)
if candidate.tag == 'screen':
sid = candidate.attrib.get('id', None)
if (not sid) or (int(sid) == display_skin_id):
myscreen = candidate
break
else:
print "[SKIN] Hey, no suitable screen!"
else:
myscreen = xml.etree.cElementTree.fromstring(skin)
if myscreen:
screen.parsedSkin = myscreen
if myscreen is None:
print "[SKIN] No skin to read..."
myscreen = screen.parsedSkin = xml.etree.cElementTree.fromstring("<screen></screen>")
screen.skinAttributes = [ ]
skin_path_prefix = getattr(screen, "skin_path", path)
context = SkinContextStack()
s = desktop.bounds()
context.x = s.left()
context.y = s.top()
context.w = s.width()
context.h = s.height()
del s
collectAttributes(screen.skinAttributes, myscreen, context, skin_path_prefix, ignore=("name",))
context = SkinContext(context, myscreen.attrib.get('position'), myscreen.attrib.get('size'))
screen.additionalWidgets = [ ]
screen.renderer = [ ]
visited_components = set()
# now walk all widgets and stuff
def process_none(widget, context):
pass
def process_widget(widget, context):
get_attr = widget.attrib.get
# ok, we either have 1:1-mapped widgets ('old style'), or 1:n-mapped
# widgets (source->renderer).
wname = get_attr('name')
wsource = get_attr('source')
if wname is None and wsource is None:
print "widget has no name and no source!"
return
if wname:
#print "Widget name=", wname
visited_components.add(wname)
# get corresponding 'gui' object
try:
attributes = screen[wname].skinAttributes = [ ]
except:
raise SkinError("component with name '" + wname + "' was not found in skin of screen '" + name + "'!")
# assert screen[wname] is not Source
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('name',))
elif wsource:
# get corresponding source
#print "Widget source=", wsource
while True: # until we found a non-obsolete source
# parse our current "wsource", which might specifiy a "related screen" before the dot,
# for example to reference a parent, global or session-global screen.
scr = screen
# resolve all path components
path = wsource.split('.')
while len(path) > 1:
scr = screen.getRelatedScreen(path[0])
if scr is None:
#print wsource
#print name
raise SkinError("specified related screen '" + wsource + "' was not found in screen '" + name + "'!")
path = path[1:]
# resolve the source.
source = scr.get(path[0])
if isinstance(source, ObsoleteSource):
# however, if we found an "obsolete source", issue warning, and resolve the real source.
print "WARNING: SKIN '%s' USES OBSOLETE SOURCE '%s', USE '%s' INSTEAD!" % (name, wsource, source.new_source)
print "OBSOLETE SOURCE WILL BE REMOVED %s, PLEASE UPDATE!" % source.removal_date
if source.description:
print source.description
wsource = source.new_source
else:
# otherwise, use that source.
break
if source is None:
raise SkinError("source '" + wsource + "' was not found in screen '" + name + "'!")
wrender = get_attr('render')
if not wrender:
raise SkinError("you must define a renderer with render= for source '%s'" % wsource)
for converter in widget.findall("convert"):
ctype = converter.get('type')
assert ctype, "'convert'-tag needs a 'type'-attribute"
#print "Converter:", ctype
try:
parms = converter.text.strip()
except:
parms = ""
#print "Params:", parms
converter_class = my_import('.'.join(("Components", "Converter", ctype))).__dict__.get(ctype)
c = None
for i in source.downstream_elements:
if isinstance(i, converter_class) and i.converter_arguments == parms:
c = i
if c is None:
c = converter_class(parms)
c.connect(source)
source = c
renderer_class = my_import('.'.join(("Components", "Renderer", wrender))).__dict__.get(wrender)
renderer = renderer_class() # instantiate renderer
renderer.connect(source) # connect to source
attributes = renderer.skinAttributes = [ ]
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('render', 'source'))
screen.renderer.append(renderer)
def process_applet(widget, context):
try:
codeText = widget.text.strip()
widgetType = widget.attrib.get('type')
code = compile(codeText, "skin applet", "exec")
except Exception, ex:
raise SkinError("applet failed to compile: " + str(ex))
if widgetType == "onLayoutFinish":
screen.onLayoutFinish.append(code)
else:
raise SkinError("applet type '%s' unknown!" % widgetType)
def process_elabel(widget, context):
w = additionalWidget()
w.widget = eLabel
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_epixmap(widget, context):
w = additionalWidget()
w.widget = ePixmap
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_screen(widget, context):
for w in widget.getchildren():
conditional = w.attrib.get('conditional')
if conditional and not [i for i in conditional.split(",") if i in screen.keys()]:
continue
p = processors.get(w.tag, process_none)
try:
p(w, context)
except SkinError, e:
print "[Skin] SKIN ERROR in screen '%s' widget '%s':" % (name, w.tag), e
def process_panel(widget, context):
n = widget.attrib.get('name')
if n:
try:
s = dom_screens[n]
except KeyError:
print "[SKIN] Unable to find screen '%s' referred in screen '%s'" % (n, name)
else:
process_screen(s[0], context)
layout = widget.attrib.get('layout')
if layout == 'stack':
cc = SkinContextStack
else:
cc = SkinContext
try:
c = cc(context, widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'))
except Exception, ex:
raise SkinError("Failed to create skincontext (%s,%s,%s) in %s: %s" % (widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'), context, ex) )
process_screen(widget, c)
processors = {
None: process_none,
"widget": process_widget,
"applet": process_applet,
"eLabel": process_elabel,
"ePixmap": process_epixmap,
"panel": process_panel
}
try:
context.x = 0 # reset offsets, all components are relative to screen
context.y = 0 # coordinates.
process_screen(myscreen, context)
except Exception, e:
print "[Skin] SKIN ERROR in %s:" % name, e
from Components.GUIComponent import GUIComponent
nonvisited_components = [x for x in set(screen.keys()) - visited_components if isinstance(x, GUIComponent)]
assert not nonvisited_components, "the following components in %s don't have a skin entry: %s" % (name, ', '.join(nonvisited_components))
# This may look pointless, but it unbinds 'screen' from the nested scope. A better
# solution is to avoid the nested scope above and use the context object to pass
# things around.
screen = None
visited_components = None
|
gpl-2.0
| 5,488,487,470,184,430,000
| 32.465465
| 231
| 0.679559
| false
| 3.049252
| true
| false
| false
|
filannim/ManTIME
|
mantime/settings.py
|
1
|
1226
|
#!/usr/bin/env python
#
# Copyright 2014 Michele Filannino
#
# gnTEAM, School of Computer Science, University of Manchester.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU General Public License.
#
# author: Michele Filannino
# email: filannim@cs.man.ac.uk
#
# For details, see www.cs.man.ac.uk/~filannim/
'''This file containes some absolute paths you need to customize according to
you installation.'''
import os
HOME = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
LANGUAGE = 'english'
PATH_CRF_PP_ENGINE_TRAIN = os.path.abspath(os.environ['MANTIME_CRF_TRAIN'])
PATH_CRF_PP_ENGINE_TEST = os.path.abspath(os.environ['MANTIME_CRF_TEST'])
PATH_CORENLP_FOLDER = os.path.abspath(os.environ['MANTIME_CORENLP_FOLDER'])
PATH_CRF_CONSISTENCY_MODULE = HOME + 'components/make_consistent.py'
PATH_CRF_ADJUSTMENT_MODULE = HOME + 'components/make_adjusted.py'
PATH_MODEL_FOLDER = './models'
EVENT_ATTRIBUTES = ('class', 'pos', 'tense', 'aspect', 'polarity', 'modality')
# EVENT_ATTRIBUTES = ('type', 'polarity', 'modality', 'sec_time_rel')
NO_ATTRIBUTE = 'n/a'
GAZETTEER_FOLDER = 'mantime/data/gazetteer/'
SENTENCE_WINDOW_RELATION = 1
|
bsd-2-clause
| 6,687,968,761,473,981,000
| 37.3125
| 78
| 0.730016
| false
| 2.898345
| false
| false
| false
|
seomoz/roger-mesos-tools
|
bin/roger.py
|
1
|
2779
|
#!/usr/bin/python
from __future__ import print_function
import os
import sys
import subprocess
import re
import importlib
from cli.utils import Utils
def print_help_opt(opt, desc):
print(" {} {}".format(opt.ljust(13), desc))
def roger_help(root, commands):
print("usage: roger [-h] [-v] command [arg...]\n")
print("a command line interface to work with roger mesos.")
print("\npositional arguments:")
print_help_opt("command", "command to run.")
print_help_opt("arg", "arguments to pass to the command.")
print("\noptional arguments:")
print_help_opt("-h, --help", "show this help message and exit.")
print_help_opt("-v, --version", "show version information and exit.")
print("\ncommands:")
sys.path.append("{}/cli".format(root))
for command in commands:
description = ""
module_name = "roger_" + command
cmd_module = importlib.import_module(module_name)
try:
description = cmd_module.describe()
except Exception as e:
pass
print_help_opt(command, description)
print("\nrun: 'roger < command > -h' for more information on a command.")
def getFiles(directory):
filenames = next(os.walk(directory))[2]
return filenames
def getCommands(files):
commands = set()
for filename in files:
if filename.startswith("roger_"):
commands.add(re.split("roger_|\.", filename)[1])
return sorted(commands)
def getScriptCall(root, command, command_args):
script_call = "roger_{}.py".format(command)
for command_arg in command_args:
script_call = script_call + " {}".format(command_arg)
return script_call
def main():
root = ''
utilsObj = Utils()
own_dir = os.path.dirname(os.path.realpath(__file__))
root = os.path.abspath(os.path.join(own_dir, os.pardir))
files = getFiles("{}/cli/".format(root))
commands = getCommands(files)
if len(sys.argv) > 1:
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
roger_help(root, commands)
elif sys.argv[1] == "-v" or sys.argv[1] == "--version":
version = utilsObj.roger_version(root)
print(version)
else:
command = sys.argv[1]
command_args = sys.argv[2:]
if command in commands:
print("root: {} command: {} args: {}".format(
root, command, command_args
))
script_call = getScriptCall(root, command, command_args)
os.system(script_call)
else:
raise SystemExit("Command is not valid. Exiting.")
else:
raise SystemExit("No arguments found. Please refer to usage: roger -h")
if __name__ == "__main__":
main()
|
apache-2.0
| -4,948,606,142,957,857,000
| 30.224719
| 79
| 0.592659
| false
| 3.745283
| false
| false
| false
|
jegger/kivy
|
kivy/tests/test_uix_textinput.py
|
1
|
2204
|
'''
uix.textinput tests
========================
'''
import unittest
from kivy.tests.common import GraphicUnitTest
from kivy.uix.textinput import TextInput
class TextInputTest(unittest.TestCase):
def test_focusable_when_disabled(self):
ti = TextInput()
ti.disabled = True
ti.focused = True
ti.bind(focus=self.on_focused)
def on_focused(self, instance, value):
self.assertTrue(instance.focused, value)
def test_wordbreak(self):
self.test_txt = "Firstlongline\n\nSecondveryverylongline"
ti = TextInput(width='30dp', size_hint_x=None)
ti.bind(text=self.on_text)
ti.text = self.test_txt
def on_text(self, instance, value):
# Check if text is modified while recreating from lines and lines_flags
self.assertEquals(instance.text, self.test_txt)
# Check if wordbreaking is correctly done
# If so Secondvery... should start from the 7th line
pos_S = self.test_txt.index('S')
self.assertEquals(instance.get_cursor_from_index(pos_S), (0, 6))
class TextInputGraphicTest(GraphicUnitTest):
def test_text_validate(self):
ti = TextInput(multiline=False)
ti.focus = True
self.render(ti)
self.assertFalse(ti.multiline)
self.assertTrue(ti.focus)
self.assertTrue(ti.text_validate_unfocus)
ti.validate_test = None
ti.bind(on_text_validate=lambda *_: setattr(
ti, 'validate_test', True
))
ti._key_down(
(
None, # displayed_str
None, # internal_str
'enter', # internal_action
1 # scale
),
repeat=False
)
self.assertTrue(ti.validate_test)
self.assertFalse(ti.focus)
ti.validate_test = None
ti.text_validate_unfocus = False
ti.focus = True
self.assertTrue(ti.focus)
ti._key_down(
(None, None, 'enter', 1),
repeat=False
)
self.assertTrue(ti.validate_test)
self.assertTrue(ti.focus)
if __name__ == '__main__':
import unittest
unittest.main()
|
mit
| -2,391,985,931,788,283,000
| 25.878049
| 79
| 0.582123
| false
| 3.839721
| true
| false
| false
|
AllanYangZhou/oppia
|
core/domain/collection_jobs_one_off.py
|
1
|
3306
|
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""One-off jobs for collections."""
import logging
from core import jobs
from core.domain import collection_domain
from core.domain import collection_services
from core.platform import models
import feconf
(base_models, collection_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.collection])
class CollectionMigrationJob(jobs.BaseMapReduceOneOffJobManager):
"""A reusable one-time job that may be used to migrate collection schema
versions. This job will load all existing collections from the data store
and immediately store them back into the data store. The loading process of
a collection in collection_services automatically performs schema updating.
This job persists that conversion work, keeping collections up-to-date and
improving the load time of new collections.
"""
_DELETED_KEY = 'collection_deleted'
_ERROR_KEY = 'validation_error'
_MIGRATED_KEY = 'collection_migrated'
@classmethod
def entity_classes_to_map_over(cls):
return [collection_models.CollectionModel]
@staticmethod
def map(item):
if item.deleted:
yield (
CollectionMigrationJob._DELETED_KEY,
'Encountered deleted collection.')
return
# Note: the read will bring the collection up to the newest version.
collection = collection_services.get_collection_by_id(item.id)
try:
collection.validate(strict=False)
except Exception as e:
logging.error(
'Collection %s failed validation: %s' % (item.id, e))
yield (
CollectionMigrationJob._ERROR_KEY,
'Collection %s failed validation: %s' % (item.id, e))
return
# Write the new collection into the datastore if it's different from
# the old version.
if item.schema_version <= feconf.CURRENT_COLLECTION_SCHEMA_VERSION:
commit_cmds = [{
'cmd': collection_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION,
'from_version': item.schema_version,
'to_version': str(
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
}]
collection_services.update_collection(
feconf.MIGRATION_BOT_USERNAME, item.id, commit_cmds,
'Update collection schema version to %d.' % (
feconf.CURRENT_COLLECTION_SCHEMA_VERSION))
yield (
CollectionMigrationJob._MIGRATED_KEY,
'Collection successfully migrated.')
@staticmethod
def reduce(key, values):
yield (key, values)
|
apache-2.0
| 595,232,431,969,137,900
| 37
| 79
| 0.662432
| false
| 4.437584
| false
| false
| false
|
0verchenko/Utils
|
googleapis_mock.py
|
1
|
5420
|
import random
import json
import datetime
import ssl
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def change_host_file_configuration(for_disabling=False, path_to_hosts_file='C:\\Windows\\System32\\drivers\\etc\\hosts'):
def check_if_hosts_file_hacked(path='C:\\Windows\\System32\\drivers\\etc\\hosts'):
with open(path, 'r') as target_file:
target_file_content = target_file.read()
if 'android.googleapis.com' in target_file_content:
return True
else:
return False
try:
if for_disabling:
if not check_if_hosts_file_hacked(path=path_to_hosts_file):
print('The "android.googleapis.com" record not in hosts file.')
return True
else:
with open(path_to_hosts_file, 'r') as hosts_file:
hosts_file_content = hosts_file.readlines()
with open(path_to_hosts_file, 'w') as hosts_file:
for line in hosts_file_content:
if 'android.googleapis.com' not in line:
hosts_file.write(line)
if not check_if_hosts_file_hacked(path=path_to_hosts_file):
print('The "android.googleapis.com" record was removed from hosts file.')
return True
else:
if not check_if_hosts_file_hacked(path=path_to_hosts_file):
with open(path_to_hosts_file, 'a') as hosts_file:
################################################################################
################################################################################
# Server ip that will be GCM imitator type below:
################################################################################
################################################################################
hosts_file.write('127.0.0.1 android.googleapis.com\n')
if check_if_hosts_file_hacked(path=path_to_hosts_file):
print('The "android.googleapis.com" record was added to hosts file.')
return True
else:
print('The "android.googleapis.com" record is already in hosts file.')
return False
except IOError:
print('Unable to check/modify hosts file.')
return False
class Responce_Sender(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=UTF-8')
self.send_header('Date', '%s' % datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT'))
self.send_header('Expires', '%s' % datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT'))
self.send_header('Cache-Control', 'private, max-age=0')
self.send_header('X-Content-Type-Options', 'nosniff')
self.send_header('X-Frame-Options', 'SAMEORIGIN')
self.send_header('Server', 'GSE')
self.send_header('Alt-Svc', 'quic=":443"; ma=2592000; v="39,38,37,35"')
self.send_header('Accept-Ranges', 'none')
self.send_header('Vary', 'Accept-Encoding')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write("<html><body><h1>hi!</h1></body></html>")
def do_HEAD(self):
self._set_headers()
def do_POST(self):
print self.path
content_len = int(self.headers.getheader('content-length', 0))
post_body = self.rfile.read(content_len)
# print post_body
try:
json_request = eval(post_body)
number_of_tokens = len(json_request['registration_ids'])
print('number of tokens:%s' % number_of_tokens)
except:
print 'error happened'
with open('logfile.txt', 'a') as logfile:
logfile.write(post_body)
return
self._set_headers()
multicast_id = random.randint(1000000000000000000, 9999999999999999999)
message_id = int(str(multicast_id)[:16])
post_responce = {
"multicast_id": multicast_id,
"success": number_of_tokens,
"failure":0,
"canonical_ids":0,
"results": []}
for x in range(number_of_tokens):
post_responce["results"].append({"message_id": "0:{message_id}%8ad5829ef9fd7ecd".format(message_id=message_id + x)})
print('Sending responce for %s tokens' % number_of_tokens)
self.wfile.write(json.dumps(post_responce))
def run(server_class=HTTPServer, handler_class=Responce_Sender, port=2195):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
httpd.socket = ssl.wrap_socket(httpd.socket, certfile='server.pem', server_side=True)
print '%s - starting httpd...' % datetime.datetime.now().strftime('%d %b %Y %H:%M:%S')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print '\t%s - stopping httpd.' % datetime.datetime.now().strftime('%d %b %Y %H:%M:%S')
if __name__ == "__main__":
from sys import argv
change_host_file_configuration()
print('Starting http mock')
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
|
apache-2.0
| -5,844,384,114,319,613,000
| 43.065041
| 128
| 0.537823
| false
| 3.99705
| false
| false
| false
|
softelnet/sponge
|
sponge-kb/sponge-kb-designer/src/main/resources/sponge/designer/storage.py
|
1
|
1331
|
"""
Sponge Knowledge Base
WARNING: THIS IS A WORK IN PROGRESS!
"""
from org.openksavi.sponge.core.action import BaseActionMeta, BaseActionBuilder
from java.util.concurrent.atomic import AtomicInteger
from java.util.concurrent import CopyOnWriteArrayList
import re
def onInit():
global STORAGE
STORAGE = createStorage()
def createStorage():
storage = Storage()
storage.addAction(BaseActionBuilder("Echo").withLabel("Echo").withArgs([StringType("text").withLabel("Text")]).getMeta())
return storage
class Storage:
def __init__(self):
self.actions = CopyOnWriteArrayList()
self.currentId = AtomicInteger(0)
def addAction(self, actionMeta):
if list(filter(lambda action: action.name == actionMeta.name, self.actions)):
raise Exception("The action {} has already been added".format(actionMeta.name))
self.actions.add(actionMeta)
def getAction(self, name):
return filter(lambda action: action.name == name, self.actions)[0]
def updateAction(self, name, actionMeta):
action = self.getAction(name)
action.name = actionMeta.name
action.label = actionMeta.label
action.description = actionMeta.description
action.callable = actionMeta.callable
action.activatable = actionMeta.activatable
|
apache-2.0
| 5,470,633,246,581,718,000
| 29.953488
| 125
| 0.700225
| false
| 3.926254
| false
| false
| false
|
AerisCloud/AerisCloud
|
aeriscloud/s3.py
|
1
|
2331
|
import math
import requests
import time
import xml.etree.ElementTree as ET
# from .config import config
class S3:
def __init__(self, bucket, endpoint_url=None):
"""
The S3 class provides methods to manage files from a bucket.
:param bucket: The bucket to use
:type bucket: String
"""
self._bucket = bucket
self._endpoint_url = endpoint_url
if not self._endpoint_url:
self._endpoint_url = 'http://%s.s3.amazonaws.com' % bucket
def put(self, data, key):
"""
Upload a file to the S3 bucket
:param data: The content of the file to upload
:type data: any
:param key: The name of the file to post
:type key: String
:return: The url of the uploaded file
:rtype: String
"""
url = self._endpoint_url + '/' + key
r = requests.put(url, data=data)
r.raise_for_status()
return url
def generate_key(self, seed, ext):
"""
Generate a key supposed to be unique in the bucket
:param self: The seed to use to generate the name
:type self: String
:param ext: The file extension
:type ext: String
:return: A key to upload a new file
:rtype: String
"""
return "{0}-{1}.{2}".format(
seed,
int(math.floor(time.time())),
ext
)
def list_bucket(self):
"""
Return the list of the files in the bucket
:return: List of files
:rtype: List
"""
url = self._endpoint_url + '/'
r = requests.get(url)
r.raise_for_status()
xml = ET.fromstring(r.text)
files = []
for child in xml:
if child.tag.endswith('Contents'):
file = {}
# Convert the XML data to python object
for file_data in child:
if file_data.tag.endswith('Key'):
file['Key'] = file_data.text
if file_data.tag.endswith('LastModified'):
file['LastModified'] = file_data.text
if file_data.tag.endswith('Size'):
file['Size'] = file_data.text
files.append(file)
return files
|
mit
| 3,940,088,200,830,511,600
| 27.084337
| 70
| 0.517375
| false
| 4.340782
| false
| false
| false
|
GNOME/gedit-latex
|
latex/latex/completion.py
|
1
|
12450
|
# -*- coding: utf-8 -*-
# This file is part of the Gedit LaTeX Plugin
#
# Copyright (C) 2010 Michael Zeising
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public Licence as published by the Free Software
# Foundation; either version 2 of the Licence, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more
# details.
#
# You should have received a copy of the GNU General Public Licence along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
latex.completion
LaTeX-specific completion classes
"""
from logging import getLogger
from gi.repository import GdkPixbuf
from ..resources import Resources
from ..completion import ICompletionHandler, Proposal
class LaTeXCommandProposal(Proposal):
"""
A proposal inserting a Template when activated
"""
def __init__(self, overlap, snippet, label):
self._snippet = snippet
self._label = label
self._overlap = overlap
self._icon = GdkPixbuf.Pixbuf.new_from_file(Resources().get_icon("i_command.png"))
@property
def source(self):
return self._snippet
@property
def label(self):
return self._label
@property
def details(self):
return None
@property
def icon(self):
return self._icon
@property
def overlap(self):
return self._overlap
class LaTeXChoiceProposal(Proposal):
"""
A proposal inserting a simple string when activated
"""
def __init__(self, overlap, source, label, details):
self._source = source
self._details = details
self._overlap = overlap
self._label = label
self._icon = GdkPixbuf.Pixbuf.new_from_file(Resources().get_icon("i_choice.png"))
@property
def source(self):
return self._source
@property
def label(self):
return self._label
@property
def details(self):
return self._details
@property
def icon(self):
return self._icon
@property
def overlap(self):
return self._overlap
from .model import LanguageModelFactory, Choice, MandatoryArgument, OptionalArgument
from .parser import PrefixParser, Node
from ..bibtex.cache import BibTeXDocumentCache
class LaTeXCompletionHandler(ICompletionHandler):
"""
This implements the LaTeX-specific code completion
"""
_log = getLogger("LaTeXCompletionHandler")
trigger_keys = ["backslash", "braceleft"]
prefix_delimiters = ["\\"]
def __init__(self):
self._log.debug("init")
#get the language_model singleton
self._language_model = LanguageModelFactory().get_language_model()
self._bibtex_document_cache = BibTeXDocumentCache()
def set_outline(self, outline):
"""
Process a LaTeX outline model
@param outline: a latex.outline.Outline instance
"""
# labels
label_choices = [Choice(None, label.value) for label in outline.labels]
self._language_model.fill_placeholder("Labels", label_choices)
# colors
color_choices = [Choice(None, color) for color in outline.colors]
self._language_model.fill_placeholder("Colors", color_choices)
# newcommands
self._language_model.set_newcommands(outline.newcommands)
# newenvironments
newenvironments = []
for n in outline.newenvironments:
choice = Choice(None, n.value)
newenvironments.append(choice)
self._language_model.fill_placeholder("Newenvironments", newenvironments)
#
# bibtex entries
#
try:
entry_choices = []
for bib_file in outline.bibliographies:
try:
bibtex_document = self._bibtex_document_cache.get_document(bib_file)
# generate choices from entries
for entry in bibtex_document.entries:
# build table data for DetailsPopup
rows = []
for field in entry.fields:
rows.append([field.name, field.valueMarkup])
entry_choices.append(Choice(None, entry.key, rows))
except OSError:
# BibTeX file not found
self._log.error("Not found: %s" % bib_file)
# attach to placeholders in CommandStore
self._language_model.fill_placeholder("Bibitems", entry_choices)
except IOError:
self._log.debug("Failed to provide BibTeX completion due to IOError")
def set_neighbors(self, tex_files, bib_files, graphic_files):
"""
Populate the lists of neighbor files
@param tex_files: list of neighbor TeX files
@param bib_files: list of neighbor BibTeX files
@param graphic_files: list of neighbor graphics
"""
tex_choices = [Choice(None, file.shortbasename) for file in tex_files]
self._language_model.fill_placeholder("TexFiles", tex_choices)
bib_choices = [Choice(None, file.shortbasename) for file in bib_files]
self._language_model.fill_placeholder("BibFiles", bib_choices)
graphic_choices = [Choice(None, file.basename) for file in graphic_files]
self._language_model.fill_placeholder("ImageFiles", graphic_choices)
def complete(self, prefix):
"""
Try to complete a given prefix
"""
self._log.debug("complete: '%s'" % prefix)
#proposals = [LaTeXTemplateProposal(Template("Hello[${One}][${Two}][${Three}]"), "Hello[Some]"), LaTeXProposal("\\world")]
fragment = Node(Node.DOCUMENT)
parser = PrefixParser()
try:
parser.parse(prefix, fragment)
modelParser = PrefixModelParser(self._language_model)
proposals = modelParser.parse(fragment)
self._log.debug("Generated %s proposals" % len(proposals))
return proposals
except Exception as e:
self._log.debug(e)
return []
from ..preferences import Preferences
from . import LaTeXSource
class PrefixModelParser(object):
"""
This parses the document model of a prefix and generates proposals accordingly
This is used by the LaTeXCompletionHandler class
"""
_log = getLogger("PrefixModelParser")
def __init__(self, language_model):
self.__language_model = language_model
self.__light_foreground = Preferences().get("light-foreground-color")
def __create_proposals_from_commands(self, commands, overlap):
"""
Generate proposals for commands
"""
proposals = []
for command in commands:
label = command.name
snippet = "\\" + command.name
for idx, argument in enumerate(command.children):
if type(argument) is MandatoryArgument:
label += "{<span color='%s'>%s</span>}" % (self.__light_foreground, argument.label)
snippet += "{${%s:%s}}" % (idx+1, argument.label)
elif type(argument) is OptionalArgument:
label += "[<span color='%s'>%s</span>]" % (self.__light_foreground, argument.label)
snippet += "[${%s:%s}]" % (idx+1, argument.label)
if command.package:
label += " <small><b>%s</b></small>" % command.package
# workaround for latex.model.Element.package may be None
# TODO: latex.model.Element.package should be a list of packages
if command.package is None:
packages = []
else:
packages = [command.package]
proposal = LaTeXCommandProposal(overlap, LaTeXSource(snippet, packages), label)
proposals.append(proposal)
return proposals
def __create_proposals_from_choices(self, choices, overlap):
"""
Generate proposals for argument choices
"""
proposals = []
for choice in choices:
label = choice.value
if choice.package:
label += " <small><b>%s</b></small>" % choice.package
# see above
if choice.package is None:
packages = []
else:
packages = [choice.package]
proposal = LaTeXChoiceProposal(overlap, LaTeXSource(choice.value, packages), label, choice.details)
proposals.append(proposal)
return proposals
def parse(self, prefixFragment):
"""
Returns choices
"""
# root node of the prefix model must be COMMAND
commandNode = prefixFragment[-1]
if commandNode.type != Node.COMMAND:
return []
commandName = commandNode.value
if len(commandNode) == 0:
# command has no arguments...
if len(commandName) == 0:
# no name, so propose all commands
commands = list(self.__language_model.commands.values())
overlap = 1 # only "\"
else:
commands = self.__language_model.find_command(commandName)
if len(commands) == 1 and commands[0].name == commandName:
# don't propose when only one command is found and that one
# matches the typed one
return []
overlap = len(commandName) + 1 # "\begi"
return self.__create_proposals_from_commands(commands, overlap)
# ...command has arguments
try:
self._log.debug(commandNode.xml)
# find the language model of the command
storedCommand = self.__language_model.commands[commandName]
try:
argumentNode, storedArgument = self.__match_argument(commandNode, storedCommand)
except Exception as e:
self._log.error(e)
return []
choices = storedArgument.children
# filter argument matching the already typed argument text
argumentValue = argumentNode.innerText
if len(argumentValue):
choices = [choice for choice in choices if choice.value.startswith(argumentValue)]
overlap = len(argumentValue)
else:
overlap = 0
return self.__create_proposals_from_choices(choices, overlap)
except KeyError:
self._log.debug("Command not found: %s" % commandName)
return []
def __match_argument(self, command, model_command):
"""
@param command: the parsed command Node
@param model_command: the according model command
@return: (matched argument, model argument)
"""
# push the arguments of the model command on a stack
model_argument_stack = []
model_argument_stack.extend(model_command.children)
model_argument_stack.reverse()
for argument in command:
if argument.type == Node.MANDATORY_ARGUMENT:
# optional arguments in the model may be skipped
while True:
try:
model_argument = model_argument_stack.pop()
if model_argument.type != Node.OPTIONAL_ARGUMENT:
break
except IndexError:
# no more optional arguments to skip - signatures can't match
raise Exception("Signatures don't match")
if not argument.closed:
return (argument, model_argument)
elif argument.type == Node.OPTIONAL_ARGUMENT:
model_argument = model_argument_stack.pop()
if model_argument.type != Node.OPTIONAL_ARGUMENT:
raise Exception("Signatures don't match")
if not argument.closed:
return (argument, model_argument)
raise Exception("No matching model argument found")
# ex:ts=4:et:
|
gpl-3.0
| 2,227,404,283,291,466,000
| 30.518987
| 130
| 0.592048
| false
| 4.583947
| false
| false
| false
|
Psycojoker/dierentheater
|
scraper/utils.py
|
1
|
4715
|
# -*- coding:Utf-8 -*-
# Dieren Theater - lachambre.be to json sausage machine
# Copyright (C) 2012 Laurent Peuch <cortex@worlddomination.be>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import logging
logger = logging.getLogger('')
from urllib import quote
from history.utils import irc
from .tasks import app
LACHAMBRE_PREFIX = "http://www.lachambre.be/kvvcr/"
DOSSIER_ID_REGEX = "dossierID=([0-9A-Za-z-]+).xml"
def get_or_create(klass, _id=None, **kwargs):
if _id is None:
object = klass.objects.filter(**kwargs)
else:
object = klass.objects.filter(**{_id: kwargs[_id]})
if object:
return object[0]
else:
logger.debug("\033[0;36madd new %s %s\033[0m" % (klass.__name__, kwargs))
result = klass(**kwargs)
result.save()
return result
def update_or_create(klass, _id=None, **kwargs):
if _id is None:
object = klass.objects.filter(**kwargs)
else:
object = klass.objects.filter(**{_id: kwargs[_id]})
if object:
result = object[0]
for key, value in kwargs.items():
setattr(result, key, value)
logger.debug("\033[0;36mupdate %s %s\033[0m" % (klass.__name__, kwargs))
else:
logger.debug("\033[0;32add new %s %s\033[0m" % (klass.__name__, kwargs))
result = klass(**kwargs)
result.save()
return result
def get_text_else_blank(dico, key):
return dico[key].text if dico.get(key) and dico[key].a else ""
def get_href_else_blank(dico, key):
return dico[key].a["href"] if dico.get(key) and dico[key].a else ""
def get_items_list_else_empty_list(dico, key):
return dico[key].text.split(" | ") if dico.get(key) else []
def dico_get_text(dico, key):
if dico.get(key):
return dico[key].text
return ""
class AccessControlDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.accessed = set()
def __getitem__(self, key):
self.accessed.add(key)
return dict.__getitem__(self, key)
def get_not_accessed_keys(self):
a = []
for i in self.keys():
if i not in self.accessed:
a.append(i)
elif isinstance(self[i], AccessControlDict) and self[i].get_not_accessed_keys():
a.append(i)
a.append(self[i].get_not_accessed_keys())
return a
def die_if_got_not_accessed_keys(self):
if self.get_not_accessed_keys():
logger.error("\nError: untreated sections:")
irc("\nError: untreated sections:")
for i in self.get_not_accessed_keys():
if isinstance(i, (str, unicode)):
logger.error("* %s" % i)
irc("* %s" % i.encode("Utf-8"))
else:
for j in i:
logger.error(" * %s" % j)
irc(" * %s" % j.encode("Utf-8"))
logger.error("------------ stop ------------")
irc("Bram: Error: dico got un-accessed keys, die")
import sys
sys.exit(1)
def clean_text(text):
def rep(result):
string = result.group() # "&#xxx;"
n = int(string[2:-1])
uchar = unichr(n) # matching unicode char
return uchar
return re.sub("(\r|\t|\n| )+", " ", re.sub("&#\d+;", rep, text)).strip()
def lame_url(url):
# convert super lame urls of lachambre.be into something uzable
return quote(url.encode("iso-8859-1"), safe="%/:=&?~#+!$,;'@()*[]")
def table2dic(table):
dico = {}
for x, y in zip(table[::2], table[1::2]):
dico[x.text] = y.text if y.a is None else y.a
return dico
class Parsable(object):
@classmethod
def scrape(klass, cache=False, sync=False):
if sync:
klass.fetch_list(cache=cache, sync=sync)
else:
return klass.fetch_list.delay(cache=cache, sync=sync)
@classmethod
def fetch_list(klass, cache=False, sync=False):
raise NotImplementedError()
|
agpl-3.0
| 1,099,140,688,297,136,500
| 29.419355
| 92
| 0.577943
| false
| 3.429091
| false
| false
| false
|
pablocscode/TFG-CAEBAT
|
representacion2.0.py
|
1
|
3399
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 10 10:14:31 2017
@author: Pablo
Objetivos:
-Representar de forma dinámica los resultados del archivo profiles.out para cualquier tiempo
Guía:
-Este script debe encontrarse en la misma carpeta que los archivos profiles.out y halfcells.out
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
#Leemos todas las líneas del archivo
archivo = open('profiles.out','r')
lineas = archivo.readlines()
archivo.close()
#Calculamos el número de filas del archivo para cada tiempo
i = 4 #Empieza a haber datos a partir de la línea 4
num_distancias = 0
#Se aumenta el contador con cada línea distinta de cero de la primera matriz de
#tiempos
while lineas[i] != ' \n':
num_distancias += 1
i += 1
#Calculamos el número de tiempos del archivo
datos_halfcells = open('halfcells.out','r')
lineas_halfcells = datos_halfcells.readlines()
datos_halfcells.close()
num_tiempos = len(lineas_halfcells)-1 #la primera linea no tiene datos
#Declaramos los vectores que contendrán los valores de las columnas
distancia = np.zeros((num_tiempos,num_distancias))
C_Elec = np.zeros((num_tiempos,num_distancias))
C_Sol_Surf = np.zeros((num_tiempos,num_distancias))
Liq_Pot = np.zeros((num_tiempos,num_distancias))
Solid_Pot = np.zeros((num_tiempos,num_distancias))
J_main = np.zeros((num_tiempos,num_distancias))
tiempo = np.zeros(num_tiempos)
#Inicializamos para empezar el ciclo for
fila =0
columna = 0
#Cada línea (fila) representa los datos para un tiempo concreto
for j in range(4,(num_distancias+6)*num_tiempos,num_distancias+6):
for i in range(j,j+num_distancias): #Empieza a haber datos a partir de la línea 4
#Cada elemento de "lineas" es un línea entera que convertimos en un vector
linea = lineas[i].split(',')
#A cada variable le vamos asignando su valor de cada línea que leemos
distancia[fila,columna] = float(linea[0])
C_Elec[fila,columna] = float(linea[1])
C_Sol_Surf[fila,columna] = float(linea[2])
Liq_Pot[fila,columna] = float(linea[3])
Solid_Pot[fila,columna] = float(linea[4])
J_main[fila,columna] = float(linea[5])
columna = columna +1
#Asignamos el tiempo de cada gráfica
linea = lineas[j-1].split()
tiempo[fila] = float(linea[2])
#Al final del ciclo for pasamos a la siguiente fila y ponemos a cero las columnas
fila = fila+1
columna = 0
#Representamos los resultados
#Figura 1
fig1 = plt.figure(1)
ax1 = fig1.add_subplot(1,1,1)
#plt.axis([0, 1, -10, 10])
plt.subplots_adjust(left=0.25, bottom=0.25)
axi1 = plt.axes([0.2, 0.15, 0.65, 0.03])
si1 = Slider(axi1, 'Tiempo', 0, 100, valinit=0)
#Figura 2
fig2 = plt.figure(2)
ax2 = fig2.add_subplot(1,1,1)
#plt.axis([0, 1, -10, 10])
plt.subplots_adjust(left=0.25, bottom=0.25)
ax2.set_ylim([0, 0.9])
ax2.set_xlim([0, 100])
axi2 = plt.axes([0.2, 0.15, 0.65, 0.03])
si2 = Slider(axi2, 'Tiempo',0,num_tiempos-1,valinit = 0)
def plot1(val):
i = int(si1.val)
ax1.clear()
ax1.plot(C_Elec[i])
def plot2(val):
i = int(si2.val)
ax2.clear()
ax2.set_ylim([0, 0.9])
ax2.set_xlim([0, num_distancias])
ax2.plot(C_Sol_Surf[i])
si1.on_changed(plot1)
si2.on_changed(plot2)
|
gpl-3.0
| -8,595,424,597,173,040
| 28.781818
| 95
| 0.661843
| false
| 2.37281
| false
| false
| false
|
EmreAtes/spack
|
lib/spack/spack/build_systems/autotools.py
|
1
|
19376
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import inspect
import os
import os.path
import shutil
import stat
from subprocess import PIPE
from subprocess import check_call
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir, join_path, force_remove
from spack.package import PackageBase, run_after, run_before
from spack.util.executable import Executable
class AutotoolsPackage(PackageBase):
"""Specialized class for packages built using GNU Autotools.
This class provides four phases that can be overridden:
1. :py:meth:`~.AutotoolsPackage.autoreconf`
2. :py:meth:`~.AutotoolsPackage.configure`
3. :py:meth:`~.AutotoolsPackage.build`
4. :py:meth:`~.AutotoolsPackage.install`
They all have sensible defaults and for many packages the only thing
necessary will be to override the helper method
:py:meth:`~.AutotoolsPackage.configure_args`.
For a finer tuning you may also override:
+-----------------------------------------------+--------------------+
| **Method** | **Purpose** |
+===============================================+====================+
| :py:attr:`~.AutotoolsPackage.build_targets` | Specify ``make`` |
| | targets for the |
| | build phase |
+-----------------------------------------------+--------------------+
| :py:attr:`~.AutotoolsPackage.install_targets` | Specify ``make`` |
| | targets for the |
| | install phase |
+-----------------------------------------------+--------------------+
| :py:meth:`~.AutotoolsPackage.check` | Run build time |
| | tests if required |
+-----------------------------------------------+--------------------+
"""
#: Phases of a GNU Autotools package
phases = ['autoreconf', 'configure', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'AutotoolsPackage'
#: Whether or not to update ``config.guess`` on old architectures
patch_config_guess = True
#: Targets for ``make`` during the :py:meth:`~.AutotoolsPackage.build`
#: phase
build_targets = []
#: Targets for ``make`` during the :py:meth:`~.AutotoolsPackage.install`
#: phase
install_targets = ['install']
#: Callback names for build-time test
build_time_test_callbacks = ['check']
#: Callback names for install-time test
install_time_test_callbacks = ['installcheck']
#: Set to true to force the autoreconf step even if configure is present
force_autoreconf = False
#: Options to be passed to autoreconf when using the default implementation
autoreconf_extra_args = []
@run_after('autoreconf')
def _do_patch_config_guess(self):
"""Some packages ship with an older config.guess and need to have
this updated when installed on a newer architecture. In particular,
config.guess fails for PPC64LE for version prior to a 2013-06-10
build date (automake 1.13.4) and for ARM (aarch64)."""
if not self.patch_config_guess or (not self.spec.satisfies(
'target=ppc64le') and not self.spec.satisfies('target=aarch64')
):
return
my_config_guess = None
config_guess = None
if os.path.exists('config.guess'):
# First search the top-level source directory
my_config_guess = 'config.guess'
else:
# Then search in all sub directories.
# We would like to use AC_CONFIG_AUX_DIR, but not all packages
# ship with their configure.in or configure.ac.
d = '.'
dirs = [os.path.join(d, o) for o in os.listdir(d)
if os.path.isdir(os.path.join(d, o))]
for dirname in dirs:
path = os.path.join(dirname, 'config.guess')
if os.path.exists(path):
my_config_guess = path
if my_config_guess is not None:
try:
check_call([my_config_guess], stdout=PIPE, stderr=PIPE)
# The package's config.guess already runs OK, so just use it
return
except Exception:
pass
else:
return
# Look for a spack-installed automake package
if 'automake' in self.spec:
automake_path = os.path.join(self.spec['automake'].prefix, 'share',
'automake-' +
str(self.spec['automake'].version))
path = os.path.join(automake_path, 'config.guess')
if os.path.exists(path):
config_guess = path
# Look for the system's config.guess
if config_guess is None and os.path.exists('/usr/share'):
automake_dir = [s for s in os.listdir('/usr/share') if
"automake" in s]
if automake_dir:
automake_path = os.path.join('/usr/share', automake_dir[0])
path = os.path.join(automake_path, 'config.guess')
if os.path.exists(path):
config_guess = path
if config_guess is not None:
try:
check_call([config_guess], stdout=PIPE, stderr=PIPE)
mod = os.stat(my_config_guess).st_mode & 0o777 | stat.S_IWUSR
os.chmod(my_config_guess, mod)
shutil.copyfile(config_guess, my_config_guess)
return
except Exception:
pass
raise RuntimeError('Failed to find suitable config.guess')
@property
def configure_directory(self):
"""Returns the directory where 'configure' resides.
:return: directory where to find configure
"""
return self.stage.source_path
@property
def configure_abs_path(self):
# Absolute path to configure
configure_abs_path = join_path(
os.path.abspath(self.configure_directory), 'configure'
)
return configure_abs_path
@property
def build_directory(self):
"""Override to provide another place to build the package"""
return self.configure_directory
@run_before('autoreconf')
def delete_configure_to_force_update(self):
if self.force_autoreconf:
force_remove(self.configure_abs_path)
def autoreconf(self, spec, prefix):
"""Not needed usually, configure should be already there"""
# If configure exists nothing needs to be done
if os.path.exists(self.configure_abs_path):
return
# Else try to regenerate it
autotools = ['m4', 'autoconf', 'automake', 'libtool']
missing = [x for x in autotools if x not in spec]
if missing:
msg = 'Cannot generate configure: missing dependencies {0}'
raise RuntimeError(msg.format(missing))
tty.msg('Configure script not found: trying to generate it')
tty.warn('*********************************************************')
tty.warn('* If the default procedure fails, consider implementing *')
tty.warn('* a custom AUTORECONF phase in the package *')
tty.warn('*********************************************************')
with working_dir(self.configure_directory):
m = inspect.getmodule(self)
# This part should be redundant in principle, but
# won't hurt
m.libtoolize()
m.aclocal()
# This line is what is needed most of the time
# --install, --verbose, --force
autoreconf_args = ['-ivf']
if 'pkgconfig' in spec:
autoreconf_args += [
'-I',
join_path(spec['pkgconfig'].prefix, 'share', 'aclocal'),
]
autoreconf_args += self.autoreconf_extra_args
m.autoreconf(*autoreconf_args)
@run_after('autoreconf')
def set_configure_or_die(self):
"""Checks the presence of a ``configure`` file after the
autoreconf phase. If it is found sets a module attribute
appropriately, otherwise raises an error.
:raises RuntimeError: if a configure script is not found in
:py:meth:`~AutotoolsPackage.configure_directory`
"""
# Check if a configure script is there. If not raise a RuntimeError.
if not os.path.exists(self.configure_abs_path):
msg = 'configure script not found in {0}'
raise RuntimeError(msg.format(self.configure_directory))
# Monkey-patch the configure script in the corresponding module
inspect.getmodule(self).configure = Executable(
self.configure_abs_path
)
def configure_args(self):
"""Produces a list containing all the arguments that must be passed to
configure, except ``--prefix`` which will be pre-pended to the list.
:return: list of arguments for configure
"""
return []
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass specified
compiler flags to configure."""
# Has to be dynamic attribute due to caching.
setattr(self, 'configure_flag_args', [])
for flag, values in flags.items():
if values:
values_str = '{0}={1}'.format(flag.upper(), ' '.join(values))
self.configure_flag_args.append(values_str)
def configure(self, spec, prefix):
"""Runs configure with the arguments specified in
:py:meth:`~.AutotoolsPackage.configure_args`
and an appropriately set prefix.
"""
options = getattr(self, 'configure_flag_args', [])
options += ['--prefix={0}'.format(prefix)]
options += self.configure_args()
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).configure(*options)
def build(self, spec, prefix):
"""Makes the build targets specified by
:py:attr:``~.AutotoolsPackage.build_targets``
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.build_targets)
def install(self, spec, prefix):
"""Makes the install targets specified by
:py:attr:``~.AutotoolsPackage.install_targets``
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.install_targets)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the Makefile for targets ``test`` and ``check``
and runs them if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute('test')
self._if_make_target_execute('check')
def _activate_or_not(
self,
name,
activation_word,
deactivation_word,
activation_value=None
):
"""This function contains the current implementation details of
:py:meth:`~.AutotoolsPackage.with_or_without` and
:py:meth:`~.AutotoolsPackage.enable_or_disable`.
Args:
name (str): name of the variant that is being processed
activation_word (str): the default activation word ('with' in the
case of ``with_or_without``)
deactivation_word (str): the default deactivation word ('without'
in the case of ``with_or_without``)
activation_value (callable): callable that accepts a single
value. This value is either one of the allowed values for a
multi-valued variant or the name of a bool-valued variant.
Returns the parameter to be used when the value is activated.
The special value 'prefix' can also be assigned and will return
``spec[name].prefix`` as activation parameter.
Examples:
Given a package with:
.. code-block:: python
variant('foo', values=('x', 'y'), description='')
variant('bar', default=True, description='')
calling this function like:
.. code-block:: python
_activate_or_not(
'foo', 'with', 'without', activation_value='prefix'
)
_activate_or_not('bar', 'with', 'without')
will generate the following configuration options:
.. code-block:: console
--with-x=<prefix-to-x> --without-y --with-bar
for ``<spec-name> foo=x +bar``
Returns:
list of strings that corresponds to the activation/deactivation
of the variant that has been processed
Raises:
KeyError: if name is not among known variants
"""
spec = self.spec
args = []
if activation_value == 'prefix':
activation_value = lambda x: spec[x].prefix
# Defensively look that the name passed as argument is among
# variants
if name not in self.variants:
msg = '"{0}" is not a variant of "{1}"'
raise KeyError(msg.format(name, self.name))
# Create a list of pairs. Each pair includes a configuration
# option and whether or not that option is activated
if set(self.variants[name].values) == set((True, False)):
# BoolValuedVariant carry information about a single option.
# Nonetheless, for uniformity of treatment we'll package them
# in an iterable of one element.
condition = '+{name}'.format(name=name)
options = [(name, condition in spec)]
else:
condition = '{name}={value}'
options = [
(value, condition.format(name=name, value=value) in spec)
for value in self.variants[name].values
]
# For each allowed value in the list of values
for option_value, activated in options:
# Search for an override in the package for this value
override_name = '{0}_or_{1}_{2}'.format(
activation_word, deactivation_word, option_value
)
line_generator = getattr(self, override_name, None)
# If not available use a sensible default
if line_generator is None:
def _default_generator(is_activated):
if is_activated:
line = '--{0}-{1}'.format(
activation_word, option_value
)
if activation_value is not None and activation_value(option_value): # NOQA=ignore=E501
line += '={0}'.format(
activation_value(option_value)
)
return line
return '--{0}-{1}'.format(deactivation_word, option_value)
line_generator = _default_generator
args.append(line_generator(activated))
return args
def with_or_without(self, name, activation_value=None):
"""Inspects a variant and returns the arguments that activate
or deactivate the selected feature(s) for the configure options.
This function works on all type of variants. For bool-valued variants
it will return by default ``--with-{name}`` or ``--without-{name}``.
For other kinds of variants it will cycle over the allowed values and
return either ``--with-{value}`` or ``--without-{value}``.
If activation_value is given, then for each possible value of the
variant, the option ``--with-{value}=activation_value(value)`` or
``--without-{value}`` will be added depending on whether or not
``variant=value`` is in the spec.
Args:
name (str): name of a valid multi-valued variant
activation_value (callable): callable that accepts a single
value and returns the parameter to be used leading to an entry
of the type ``--with-{name}={parameter}``.
The special value 'prefix' can also be assigned and will return
``spec[name].prefix`` as activation parameter.
Returns:
list of arguments to configure
"""
return self._activate_or_not(name, 'with', 'without', activation_value)
def enable_or_disable(self, name, activation_value=None):
"""Same as :py:meth:`~.AutotoolsPackage.with_or_without` but substitute
``with`` with ``enable`` and ``without`` with ``disable``.
Args:
name (str): name of a valid multi-valued variant
activation_value (callable): if present accepts a single value
and returns the parameter to be used leading to an entry of the
type ``--enable-{name}={parameter}``
The special value 'prefix' can also be assigned and will return
``spec[name].prefix`` as activation parameter.
Returns:
list of arguments to configure
"""
return self._activate_or_not(
name, 'enable', 'disable', activation_value
)
run_after('install')(PackageBase._run_default_install_time_test_callbacks)
def installcheck(self):
"""Searches the Makefile for an ``installcheck`` target
and runs it if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute('installcheck')
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
|
lgpl-2.1
| -7,559,344,240,667,162,000
| 40.848812
| 111
| 0.566887
| false
| 4.562279
| true
| false
| false
|
TYPO3-Documentation/sphinxcontrib.t3tablerows
|
setup.py
|
1
|
2480
|
from __future__ import absolute_import
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='t3tablerows',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version='0.3.1',
description='Implement Sphinx transformation of "table-row" containers',
long_description=long_description,
# The project's main homepage.
url='https://github.com/TYPO3-Documentation/sphinxcontrib.t3tablerows',
# Author details
author='Martin Bless',
author_email='martin@mbless.de',
# Choose your license
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Framework :: Sphinx :: Extension',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Documentation',
],
# What does your project relate to?
keywords='sphinx extension transformation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['sphinxcontrib'],
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['sphinx'],
)
|
bsd-2-clause
| 3,198,431,805,503,910,000
| 33.929577
| 98
| 0.674597
| false
| 4.182125
| false
| false
| false
|
ohrstrom/obp-media-preflight-api
|
app/preflight/models.py
|
1
|
3533
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
import logging
import os
import uuid
import hashlib
from datetime import timedelta
from django.db import models
from django.db.models.signals import post_save, pre_save, post_delete
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from celery.task.control import revoke
from base.storage import OverwriteFileSystemStorage
from .tasks import preflight_check_task
RUN_ASYNC = getattr(settings, 'PREFLIGHT_RUN_ASYNC', False)
log = logging.getLogger(__name__)
def get_media_upload_path(instance, filename):
path = [instance._meta.app_label.lower()]
#path += str(instance.uuid).split('-')
path += [str(instance.uuid)]
path += [filename]
return os.path.join(*path)
class Check(models.Model):
STATUS_INIT = 0
STATUS_PENDING = 1
STATUS_PROCESSING = 2
STATUS_DONE = 3
STATUS_ERROR = 99
STATUS_CHOICES = (
(STATUS_INIT, 'Initialized'),
(STATUS_PENDING, 'Pending'),
(STATUS_PROCESSING, 'Processing'),
(STATUS_DONE, 'Done'),
(STATUS_ERROR, 'Error'),
)
status = models.PositiveSmallIntegerField(
_('Status'),
choices=STATUS_CHOICES,
default=STATUS_PENDING,
blank=False, null=False,
db_index=True,
)
# holds celery queue task id
task_id = models.CharField(
max_length=64, null=True, blank=True, editable=True,
)
uuid = models.UUIDField(
default=uuid.uuid4, editable=False, db_index=True
)
created = models.DateTimeField(
auto_now_add=True, editable=False, db_index=True
)
updated = models.DateTimeField(
auto_now=True, editable=False, db_index=True
)
remote_uri = models.URLField(
null=True, blank=False, unique=True, db_index=True
)
media_file = models.FileField(
null=True, blank=True,
storage=OverwriteFileSystemStorage(), upload_to=get_media_upload_path
)
def __str__(self):
return '{}'.format(self.uuid)
@receiver(pre_save, sender=Check)
def check_pre_save(sender, instance, **kwargs):
if not instance.pk:
instance.status = Check.STATUS_PENDING
else:
pass
@receiver(post_save, sender=Check)
def check_post_save(sender, instance, **kwargs):
if instance.status < Check.STATUS_PROCESSING:
log.debug('Check {} needs processing'.format(instance.pk))
# check for running task - terminate if found
if instance.task_id:
log.info('task {} running - need to terminate.'.format(instance.task_id))
revoke(instance.task_id, terminate=True, signal='SIGKILL')
if RUN_ASYNC:
celery_task = preflight_check_task.apply_async((instance,))
Check.objects.filter(pk=instance.pk).update(task_id=celery_task.id)
else:
# just for debuging - the non-async version
preflight_check_task(instance)
@receiver(post_delete, sender=Check)
def check_post_delete(sender, instance, **kwargs):
# check for running task - terminate if found
if instance.task_id:
log.info('task {} running - need to terminate.'.format(instance.task_id))
revoke(instance.task_id, terminate=True, signal='SIGKILL')
if instance.media_file:
if os.path.isfile(instance.media_file.path):
os.remove(instance.media_file.path)
|
gpl-3.0
| 2,993,727,627,394,881,000
| 26.387597
| 85
| 0.658081
| false
| 3.770544
| false
| false
| false
|
chris-ch/myledger-online-bookkeeping
|
_old/server/src/oas/models.py
|
1
|
16991
|
"""
Django model for OAS.
CREATE DATABASE IF NOT EXISTS `accounting`
CHARACTER SET utf8 COLLATE utf8_general_ci;
USE `accounting`;
-- at this point run manage.py syncdb
DROP TABLE IF EXISTS `oas_template_journal_entry`;
DROP TABLE IF EXISTS `oas_template_journal_entry_group`;
DROP TABLE IF EXISTS `oas_template_name`;
DROP TABLE IF EXISTS `oas_initial_amount`;
DROP TABLE IF EXISTS `oas_internal_investment`;
DROP TABLE IF EXISTS `oas_journal_entry`;
DROP TABLE IF EXISTS `oas_journal_entry_group`;
DROP TABLE IF EXISTS `oas_account`;
DROP TABLE IF EXISTS `oas_account_type`;
DROP TABLE IF EXISTS `oas_accounting_period`;
DROP TABLE IF EXISTS `oas_legal_entity`;
DROP TABLE IF EXISTS `oas_currency`;
CREATE TABLE `oas_account_type` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`code` varchar(1) NOT NULL,
`name` varchar(64) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `code` (`code`),
UNIQUE KEY `name` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_currency` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`code` varchar(3) NOT NULL,
`name` varchar(64) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `code` (`code`),
UNIQUE KEY `name` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_legal_entity` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) NOT NULL,
`currency_id` int(11) NOT NULL,
`code` varchar(32) NOT NULL,
`name` varchar(64) NOT NULL,
`description` longtext,
`is_individual` tinyint(1) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
UNIQUE KEY `code` (`code`),
UNIQUE KEY `name` (`name`),
KEY `user_id` (`user_id`),
KEY `currency_id` (`currency_id`),
CONSTRAINT `legal_entity_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`),
CONSTRAINT `legal_entity_ibfk_2` FOREIGN KEY (`currency_id`) REFERENCES `oas_currency` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_account` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`code` varchar(32) NOT NULL,
`name` varchar(192) NOT NULL,
`description` longtext,
`account_type_id` int(11) NOT NULL,
`legal_entity_id` int(11) NOT NULL,
`user_id` int(11) NOT NULL,
`parent_id` int(11) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY (`code`, `legal_entity_id`),
UNIQUE KEY (`name`, `legal_entity_id`),
KEY `account_type_id` (`account_type_id`),
KEY `legal_entity_id` (`legal_entity_id`),
KEY `user_id` (`user_id`),
KEY `parent_id` (`parent_id`),
CONSTRAINT `account_ibfk_1` FOREIGN KEY (`account_type_id`) REFERENCES `oas_account_type` (`id`),
CONSTRAINT `account_ibfk_2` FOREIGN KEY (`legal_entity_id`) REFERENCES `oas_legal_entity` (`id`),
CONSTRAINT `account_ibfk_3` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`),
CONSTRAINT `account_ibfk_4` FOREIGN KEY (`parent_id`) REFERENCES `oas_account` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_accounting_period` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(128) NOT NULL,
`legal_entity_id` int(11) NOT NULL,
`till_date` datetime NULL,
PRIMARY KEY (`id`),
UNIQUE KEY (`name`,`legal_entity_id`),
UNIQUE KEY (`till_date`,`legal_entity_id`),
KEY (`legal_entity_id`),
CONSTRAINT FOREIGN KEY (`legal_entity_id`) REFERENCES `oas_legal_entity` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_journal_entry_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`date` datetime NOT NULL,
`description` longtext NULL,
`currency_id` int(11) NOT NULL,
`accounting_period_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `currency_id` (`currency_id`),
KEY `accounting_period_id` (`accounting_period_id`),
CONSTRAINT FOREIGN KEY (`currency_id`) REFERENCES `oas_currency` (`id`),
CONSTRAINT FOREIGN KEY (`accounting_period_id`) REFERENCES `oas_accounting_period` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_journal_entry` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`description` longtext NULL,
`ref_num` int(11) NULL,
`account_id` int(11) NOT NULL,
is_debit tinyint(1) NOT NULL,
`quantity` decimal(20,6) NOT NULL DEFAULT '1.000000',
`unit_cost` decimal(20,6) NOT NULL DEFAULT '1.000000',
`group_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `account_id` (`account_id`),
CONSTRAINT FOREIGN KEY (`account_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`group_id`) REFERENCES `oas_journal_entry_group` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_internal_investment` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`account_asset_id` int(11) NOT NULL,
`account_liability_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY (`account_asset_id`,`account_liability_id`),
UNIQUE KEY (`account_asset_id`),
KEY (`account_asset_id`),
KEY (`account_liability_id`),
CONSTRAINT FOREIGN KEY (`account_asset_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`account_liability_id`) REFERENCES `oas_account` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_initial_amount` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`account_id` int(11) NOT NULL,
`accounting_period_id` int(11) NOT NULL,
`quantity` decimal(20,6) NOT NULL DEFAULT '1.000000',
`unit_cost` decimal(20,6) NOT NULL DEFAULT '1.000000',
PRIMARY KEY (`id`),
UNIQUE KEY (`account_id`,`accounting_period_id`),
KEY (`account_id`),
KEY (`accounting_period_id`),
CONSTRAINT FOREIGN KEY (`account_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`accounting_period_id`) REFERENCES `oas_accounting_period` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_template_name` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(96) NOT NULL,
`description` longtext,
`template_currency_id` int(11) NOT NULL,
`legal_entity_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
CONSTRAINT FOREIGN KEY (`template_currency_id`) REFERENCES `oas_currency` (`id`),
CONSTRAINT FOREIGN KEY (`legal_entity_id`) REFERENCES `oas_legal_entity` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_template_journal_entry_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`id`),
`template_name_id` int(11) NOT NULL,
CONSTRAINT FOREIGN KEY (`template_name_id`) REFERENCES `oas_template_name` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_template_journal_entry` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`description` longtext,
`account_id` int(11) NOT NULL,
`is_debit` tinyint(1) NOT NULL,
`quantity` decimal(20,6) NOT NULL DEFAULT '1.000000',
`unit_cost` decimal(20,6) NOT NULL DEFAULT '1.000000',
`template_group_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
CONSTRAINT FOREIGN KEY (`account_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`template_group_id`) REFERENCES `oas_template_journal_entry_group` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
INSERT INTO oas_account_type (id,code,name) VALUES (1,'A','Asset');
INSERT INTO oas_account_type (id,code,name) VALUES (2,'L','Liability & Equity');
INSERT INTO oas_account_type (id,code,name) VALUES (3,'I','Income');
INSERT INTO oas_account_type (id,code,name) VALUES (4,'E','Expense');
INSERT INTO oas_currency (code, name) VALUES ('USD', 'US Dollar');
INSERT INTO oas_currency (code, name) VALUES ('GBP', 'Sterling');
INSERT INTO oas_currency (code, name) VALUES ('CHF', 'Swiss Franc');
INSERT INTO oas_currency (code, name) VALUES ('EUR', 'Euro');
"""
import logging
_LOG = logging.getLogger('oas.model')
from django.db import models
from django.contrib.auth.models import User
import old.server.oas.tools
CODE_ASSETS = 'A'
CODE_LIABILITIES_EQUITY = 'L'
CODE_INCOME = 'I'
CODE_EXPENSE = 'E'
#
# Custom User Model
#
#
# App model starts here
#
class AccountType(models.Model):
id = models.AutoField(primary_key=True)
code = models.CharField(unique=True, max_length=3)
name = models.CharField(unique=True, max_length=192)
class Meta:
db_table = u'oas_account_type'
def __unicode__(self):
return '%s - %s' % (self.code, self.name)
class Currency(models.Model):
id = models.AutoField(primary_key=True)
code = models.CharField(unique=True, max_length=9)
name = models.CharField(unique=True, max_length=192)
class Meta:
db_table = u'oas_currency'
def __unicode__(self):
return self.code
def build_tree(accounts):
tree = old.server.oas.tools.SimpleTreeSet()
for account in accounts:
if account.parent is None:
if not tree.has_node(account):
tree.add_root(account)
else:
tree.create_parent_child(account.parent, account)
return tree.group()
class LegalEntity(models.Model):
id = models.AutoField(primary_key=True)
code = models.CharField(unique=True, max_length=96)
name = models.CharField(unique=True, max_length=192)
description = models.TextField(blank=True)
is_individual = models.IntegerField(null=False, default=False, blank=True)
user = models.ForeignKey(User, related_name='legal_entities', on_delete=models.PROTECT)
currency = models.ForeignKey(Currency, related_name='+', null=False, blank=False, on_delete=models.PROTECT)
class Meta:
db_table = u'oas_legal_entity'
def get_asset_accounts(self):
accounts = self.accounts.filter(account_type__code=CODE_ASSETS)
as_tree = build_tree(accounts)
return as_tree
def get_liability_accounts(self):
accounts = self.accounts.filter(account_type__code=CODE_LIABILITIES_EQUITY)
as_tree = build_tree(accounts)
return as_tree
def get_income_accounts(self):
accounts = self.accounts.filter(account_type__code=CODE_INCOME)
as_tree = build_tree(accounts)
return as_tree
def get_expense_accounts(self):
accounts = self.accounts.filter(account_type__code=CODE_EXPENSE)
as_tree = build_tree(accounts)
return as_tree
def clean_journal_entries(self, account_code=None):
accounts = Account.objects.filter(legal_entity=self)
if account_code is not None:
accounts = accounts.filter(code=account_code)
for account in accounts:
JournalEntry.objects.filter(account=account).delete()
def __unicode__(self):
return self.code
class Account(models.Model):
id = models.AutoField(primary_key=True)
code = models.CharField(unique=True, max_length=32)
name = models.CharField(unique=True, max_length=192)
description = models.TextField(blank=True)
account_type = models.ForeignKey(AccountType, related_name='+', on_delete=models.PROTECT)
legal_entity = models.ForeignKey(LegalEntity, related_name='accounts', on_delete=models.PROTECT)
user = models.ForeignKey(User, related_name='accounts', on_delete=models.PROTECT)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', on_delete=models.PROTECT)
class Meta:
db_table = u'oas_account'
unique_together = (('legal_entity', 'code'), ('legal_entity', 'name'))
def update_account_type(self, account_type, visited=None):
"""
Because of redundancy in db model,
children account types need to be updated
"""
if visited is None:
visited = set()
_LOG.debug('visited: %s', visited)
_LOG.debug('node: %s', self)
assert self not in visited, 'tree not consistent: loop detected on %s' % (self)
visited.add(self)
self.account_type = account_type
# recursive call updating children account types
for child in self.children.all():
child.update_account_type(account_type, visited)
def __unicode__(self):
return '%s - %s' % (self.code, self.name)
class InternalInvestment(models.Model):
id = models.AutoField(primary_key=True)
account_asset = models.ForeignKey(Account, related_name='owner_account', unique=True, on_delete=models.PROTECT)
account_liability = models.ForeignKey(Account, related_name='investment_account', on_delete=models.PROTECT)
class Meta:
db_table = u'oas_internal_investment'
unique_together = (('account_asset', 'account_liability'),)
class AccountingPeriod(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(unique=True, max_length=128, null=False)
till_date = models.DateTimeField(null=True)
legal_entity = models.ForeignKey(LegalEntity, null=False, related_name='periods', on_delete=models.PROTECT)
class Meta:
db_table = u'oas_accounting_period'
unique_together = (('legal_entity', 'name'),)
unique_together = (('legal_entity', 'till_date'),)
class JournalEntryGroup(models.Model):
id = models.AutoField(primary_key=True)
date = models.DateTimeField(null=False)
description = models.TextField(null=True)
currency = models.ForeignKey(Currency, related_name='+', null=False, on_delete=models.PROTECT)
accounting_period = models.ForeignKey(AccountingPeriod, related_name='+', null=False, on_delete=models.PROTECT)
class Meta:
db_table = u'oas_journal_entry_group'
def __unicode__(self):
return '<group: %s, %s>' % (self.date, self.description)
class JournalEntry(models.Model):
id = models.AutoField(primary_key=True)
description = models.TextField(null=True)
ref_num = models.IntegerField(null=True, default=False, blank=True)
quantity = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
unit_cost = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
is_debit = models.IntegerField(null=False, default=False, blank=True)
account = models.ForeignKey(Account, related_name='entries', null=False, on_delete=models.PROTECT)
group = models.ForeignKey(JournalEntryGroup, related_name='entries', null=False, on_delete=models.PROTECT)
class Meta:
db_table = u'oas_journal_entry'
def __unicode__(self):
account_type = ('credit', 'debit')[self.is_debit]
return '%s' % str([account_type, self.description, self.quantity * self.unit_cost, self.group])
class InitialAmount(models.Model):
id = models.AutoField(primary_key=True)
quantity = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
unit_cost = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
account = models.ForeignKey(Account, related_name='+', null=False, on_delete=models.PROTECT)
accounting_period = models.ForeignKey(AccountingPeriod, related_name='initial_amounts', null=False,
on_delete=models.PROTECT)
class Meta:
db_table = u'oas_initial_amount'
unique_together = (('account', 'accounting_period'),)
def __unicode__(self):
return '%s' % str([self.accounting_period, self.account, self.quantity * self.unit_cost])
class TemplateSet(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(unique=True, max_length=192)
description = models.TextField(null=True)
template_currency = models.ForeignKey(Currency, related_name='+', null=False, on_delete=models.PROTECT)
legal_entity = models.ForeignKey(LegalEntity, null=False, related_name='templates', on_delete=models.PROTECT)
class Meta:
db_table = u'oas_template_name'
unique_together = (('legal_entity', 'name'),)
def __unicode__(self):
return '<template set: %s>' % (self.name)
class TemplateJournalEntryGroup(models.Model):
id = models.AutoField(primary_key=True)
template_set = models.ForeignKey(TemplateSet, db_column='template_name_id', related_name='templates', null=False,
on_delete=models.PROTECT)
class Meta:
db_table = u'oas_template_journal_entry_group'
def __unicode__(self):
return '<group template: %s>' % (self.template_set)
class TemplateJournalEntry(models.Model):
id = models.AutoField(primary_key=True)
description = models.TextField(null=True)
quantity = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
unit_cost = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
is_debit = models.IntegerField(null=False, default=False, blank=True)
account = models.ForeignKey(Account, related_name='template_entries', null=False, on_delete=models.PROTECT)
template_group = models.ForeignKey(TemplateJournalEntryGroup, related_name='entries', null=False,
on_delete=models.PROTECT)
class Meta:
db_table = u'oas_template_journal_entry'
def __unicode__(self):
account_type = ('credit', 'debit')[self.is_debit]
return '%s' % str([account_type, self.description, self.quantity * self.unit_cost, self.template_group])
|
mit
| 1,577,952,250,280,129,500
| 38.331019
| 117
| 0.682244
| false
| 3.291554
| false
| false
| false
|
sadikovi/queue
|
test/__init__.py
|
1
|
2186
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2016 sadikovi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import sys
import unittest
# Select what tests to run
RUN_TESTS = {
"test.test_const": True,
"test.test_queue": True,
"test.test_util": True,
"test.test_context": True,
"test.test_scheduler": True,
"test.test_spark": True,
"test.test_simple": True,
"test.test_submission": True
}
suites = unittest.TestSuite()
# Add individual test module
def addTests(module_name):
if module_name in RUN_TESTS and RUN_TESTS[module_name]:
module = importlib.import_module(module_name)
batch = loadSuites(module)
suites.addTest(batch)
else:
print "@skip: '%s' tests" % module_name
# Load test suites for module
def loadSuites(module):
gsuite = unittest.TestSuite()
for suite in module.suites():
print "Adding %s" % suite
gsuite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite))
return gsuite
def collectSystemTests():
for test_name in RUN_TESTS.keys():
addTests(test_name)
def main():
print ""
print "== Gathering tests info =="
print "-" * 70
collectSystemTests()
print ""
print "== Running tests =="
print "-" * 70
results = unittest.TextTestRunner(verbosity=2).run(suites)
num = len([x for x in RUN_TESTS.values() if not x])
print "%s Number of test modules skipped: %d" %("OK" if num == 0 else "WARN", num)
print ""
# Fail if there is at least 1 error or failure
if results and len(results.failures) == 0 and len(results.errors) == 0:
return 0
else:
return 1
|
apache-2.0
| 6,212,667,995,313,246,000
| 28.146667
| 86
| 0.666057
| false
| 3.711375
| true
| false
| false
|
mignev/startappsync
|
gitconfig/core.py
|
1
|
3702
|
import os
import sys
from re import search
from gitconfig.config import (
ConfigDict,
ConfigFile,
)
if sys.version_info < (2, 7):
from ordereddict import OrderedDict
from collections import MutableMapping
else:
from collections import (
OrderedDict,
MutableMapping,
)
class GitRepoNotFoundError(Exception): pass
class GitConfig():
def __init__(self,**kwargs):
self.path = kwargs.get('path', None)
self.file = kwargs.get('file', None)
if self.path:
if os.path.exists(self.path):
config_path = self.detect_git_config(self.path)
if os.path.exists(config_path):
self.config_path = config_path
self.config = ConfigFile.from_path(config_path)
else:
raise GitRepoNotFoundError(self.path)
else:
raise IOError(self.path)
else:
self.config = ConfigFile.from_file(self.file)
def detect_git_config(self, path):
config_path = ""
if search(r'\.git/config', path):
config_path = path
elif search(r'\.git', path):
config_path = "{0}/config".format(path)
else:
config_path = "{0}/.git/config".format(path)
return config_path
def has_remotes(self):
return self.has_section('remote')
def has_remote(self, remote_name):
return self.has_section('remote', remote_name)
def has_section(self, section_type, section_name = ''):
config_sections = self.config.itersections()
"""
These variables are used in return statements only
They are used to experiment with readability
"""
yes_there_is_section_with_this_name = yes_this_section_exists = True
sorry_search_section_doest_not_exist = False
for section in config_sections:
this_section_type = section[0]
search_for_section_with_spcific_name = (section_name != '')
if not search_for_section_with_spcific_name:
if this_section_type == section_type:
return yes_this_section_exists # True
else:
try:
this_section_name = section[1]
if this_section_name == section_name:
return yes_there_is_section_with_this_name # True
except IndexError:
""" These type of sections are like [core], [alias], [user]"""
continue
return sorry_search_section_doest_not_exist # False
@property
def remotes(self):
config_sections = self.config.items()
remotes = OrderedDict()
for section in config_sections:
section_type = section[0][0]
if section_type == 'remote':
remote_name = section[0][1]
remote_properties = section[1]
remotes[remote_name] = remote_properties
return remotes
@property
def sections(self):
config_sections = self.config.items()
return [section[0][0] for section in config_sections]
def set(self, section, key, value):
return self.config.set((section,), key, value)
def get(self, section, key):
section_details = section.split('.')
if len(section_details) == 2:
section_type, section_name = section_details
else:
section_type, section_name = (section, '')
return self.config.get((section_type, section_name), key)
def save(self):
return self.config.write_to_path(self.config_path)
|
mit
| -6,701,524,496,558,599,000
| 28.616
| 82
| 0.567261
| false
| 4.211604
| true
| false
| false
|
tuxxi/OpenBurn
|
openburn/application/propellant_db.py
|
1
|
2142
|
from typing import Dict
import jsonpickle
from qtpy.QtCore import QObject, Signal
from openburn.core.propellant import OpenBurnPropellant
class PropellantDatabase(QObject):
database_ready = Signal()
propellant_added = Signal(str)
propellant_edited = Signal(str)
propellant_removed = Signal(str)
def __init__(self, filename: str = None):
super(PropellantDatabase, self).__init__()
# Dict ordered by propellant name : propellant
self.propellants: Dict[str: OpenBurnPropellant] = {}
if filename is not None:
self.load_database(filename)
def propellant_names(self):
return [prop.name for prop in self.propellants]
def load_database(self, filename: str):
self.clear_database()
with open(filename, 'r') as f:
data = f.read()
if len(data) > 0:
self.propellants = jsonpickle.decode(data)
self.database_filename: str = filename
self.database_ready.emit()
def save_database(self):
with open(self.database_filename, 'w+') as f:
if len(self.propellants) > 0:
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
f.write(jsonpickle.encode(self.propellants))
def clear_database(self) -> None:
self.propellants.clear()
def add_propellant(self, propellant: OpenBurnPropellant) -> None:
self.propellants[propellant.name] = propellant
self.propellant_added.emit(propellant.name) # emit signal
def remove_propellant(self, key: str) -> None:
"""
Removes a propellant from the database
:param key: the propellant name to be removed
"""
self.propellants.pop(key)
self.propellant_removed.emit(key) # emit signal
def update_propellant(self, key: str, new_prop: OpenBurnPropellant) -> None:
"""Updates the propellant database
:param key: the old propellant's name
:param new_prop: the new propellant, to replace old_prop
"""
self.propellants[key] = new_prop
self.propellant_edited.emit(key)
|
gpl-3.0
| 4,604,087,554,832,698,400
| 32.46875
| 80
| 0.637722
| false
| 3.416268
| false
| false
| false
|
agancsos/python
|
LinkedInSort_in_Python.py
|
1
|
2716
|
#!/bin/python
class linkedinSort:
project_name=\"\";
project_month=\"\";
project_year=0;
def month_to_int(self):
month=self.project_month;
if month == \"January\":
return 1;
if month == \"February\":
return 2;
if month == \"March\":
return 3;
if month == \"April\":
return 4;
if month == \"May\":
return 5;
if month == \"June\":
return 6;
if month == \"July\":
return 7;
if month == \"August\":
return 8;
if month == \"September\":
return 9;
if month == \"October\":
return 10;
if month == \"November\":
return 12;
if month == \"December\":
return 12;
return 0;
def set_from_other(self,other):
self.project_name=other.project_name;
self.project_month=other.project_month;
self.project_year=other.project_year;
def print_dates(self):
for date in self.project_dates:
date_components=date.split( );
print self.month_to_int(date_components[0]);
def set_dates(self,name,month,year):
self.project_name=name;
self.project_month=month;
self.project_year=year;
def linkedSort(self):
return self;
def sortDates(self,dates_array):
for sorting in dates_array:
for sorting2 in dates_array:
if(sorting.project_year<sorting2.project_year):
temp_linkedin=linkedinSort();
temp_linkedin.set_from_other(sorting);
sorting.set_from_other(sorting2);
sorting2.set_from_other(temp_linkedin);
if(sorting.project_year==sorting2.project_year):
if(sorting.month_to_int()<sorting2.month_to_int()):
temp_linkedin=linkedinSort();
temp_linkedin.set_from_other(sorting);
sorting.set_from_other(sorting2);
sorting2.set_from_other(temp_linkedin)
##int main
to_be_sorted=[];
project_dates={Sample1 November 2010,PaperWorks October 2012,ResumeBuuilder October 2013,Resume_Sampler September 2013,
iNCREPYT_Alpha August 2013,LangSim November 2013,iNCREPTY_LT_Alpha August 2013,DOY April 2013,
JokeBook January 2013,HIRED January 2014,JokeBook2 January 2014,Pic2Text January 2014,BlackBook January 2014,
LangSim_LT February 2014,MovieBook February 2014,Geode October 2012,Star_wars_Roll-Ups ,Students.py October 2013};
i=0;
for dates in project_dates:
test_linkedin=linkedinSort();
temp_comp=dates.split( );
temp_name=temp_comp[0];
temp_month=temp_comp[1];
temp_year=temp_comp[2];
test_linkedin.set_dates(temp_name.replace(_, ),temp_month,temp_year);
to_be_sorted.insert(i,test_linkedin);
i+=1;
linkedinSort().sortDates(to_be_sorted);
for project in to_be_sorted:
print project.project_name, ,project.project_month, ,project.project_year;
|
mit
| 8,169,540,432,007,339,000
| 28.188889
| 129
| 0.662003
| false
| 3.147161
| false
| false
| false
|
jocelynj/weboob
|
weboob/applications/qhavesex/qhavesex.py
|
1
|
1273
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from weboob.capabilities.dating import ICapDating
from weboob.tools.application.qt import QtApplication
from .main_window import MainWindow
class QHaveSex(QtApplication):
APPNAME = 'qhavesex'
VERSION = '0.4'
COPYRIGHT = 'Copyright(C) 2010 Romain Bignon'
STORAGE_FILENAME = 'dating.storage'
def main(self, argv):
self.create_storage(self.STORAGE_FILENAME)
self.load_backends(ICapDating)
self.main_window = MainWindow(self.config, self.weboob)
self.main_window.show()
return self.weboob.loop()
|
gpl-3.0
| 7,059,353,888,160,273,000
| 34.361111
| 76
| 0.732129
| false
| 3.78869
| false
| false
| false
|
mwrlabs/veripy
|
contrib/rfc3633/rr/basic_message_exchange.py
|
1
|
3313
|
from contrib.rfc3315.constants import *
from contrib.rfc3633.dhcpv6_pd import DHCPv6PDHelper
from scapy.all import *
from veripy.assertions import *
from veripy.models import IPAddress
class BasicMessageExchangeTestCase(DHCPv6PDHelper):
"""
DHCPv6-PD Basic Message Exchange
Verify that a device can properly interoperate while using DHCPv6-PD
@private
Source: IPv6 Ready DHCPv6 Interoperability Test Suite (Section 4.1)
"""
restart_uut = True
def run(self):
self.logger.info("Waiting for a DHCPv6 Solicit message, with a IA for Prefix Delegation...")
r1 = self.node(1).received(src=self.target(1).link_local_ip(), dst=AllDHCPv6RelayAgentsAndServers, type=DHCP6_Solicit)
assertGreaterThanOrEqualTo(1, len(r1), "expected to receive one-or-more DHCPv6 Solicit messages")
assertHasLayer(DHCP6OptIA_PD, r1[0], "expected the DHCPv6 Solicit message to contain an IA for Prefix Delegation")
self.logger.info("Sending a DHCPv6 Advertise message, offering a prefix...")
self.node(1).send(
IPv6(src=str(self.node(1).link_local_ip()), dst=str(self.target(1).link_local_ip()))/
UDP(sport=DHCPv6DestPort, dport=DHCPv6SourcePort)/
self.build_dhcpv6_pd_advertise(r1[0], self.node(1), self.target(1), T1=50, T2=80))
self.logger.info("Checking for a DHCPv6 Request message...")
r2 = self.node(1).received(src=self.target(1).link_local_ip(), dst=AllDHCPv6RelayAgentsAndServers, type=DHCP6_Request)
assertGreaterThanOrEqualTo(1, len(r2), "expected to receive a DHCPv6 Request message")
assertHasLayer(DHCP6OptIA_PD, r2[0], "expected the DHCPv6 Request to contain an IA for Prefix Delegation")
assertHasLayer(DHCP6OptIAPrefix, r2[0], "expected the DHCPv6 Request to contain an IA Prefix")
assertEqual(IPAddress.identify(self.node(1).iface(0).link.v6_prefix), r2[0][DHCP6OptIAPrefix].prefix, "expected the requested Prefix to match the advertised one")
assertEqual(self.node(1).iface(0).link.v6_prefix_size, r2[0][DHCP6OptIAPrefix].plen, "expected the requested Prefix Length to match the advertised one")
self.logger.info("Sending a DHCPv6 Reply message, with the offered IA for Prefix Delegation...")
self.node(1).send(
IPv6(src=str(self.node(1).link_local_ip()), dst=str(self.target(1).link_local_ip()))/
UDP(sport=DHCPv6DestPort, dport=DHCPv6SourcePort)/
self.build_dhcpv6_pd_reply(r2[0], self.node(1), self.target(1)))
self.ui.wait(50)
self.node(1).clear_received()
self.logger.info("Waiting for a DHCPv6 Renew message, with a IA for Prefix Delegation...")
r3 = self.node(1).received(src=self.target(1).link_local_ip(), dst=AllDHCPv6RelayAgentsAndServers, type=DHCP6_Renew)
assertGreaterThanOrEqualTo(1, len(r3), "expected to receive one-or-more DHCPv6 Renew messages")
assertHasLayer(DHCP6OptIA_PD, r3[0], "expected the DHCPv6 Renew to contain an IA for Prefix Delegation")
assertHasLayer(DHCP6OptIAPrefix, r3[0], "expected the DHCPv6 Renew to contain an IA Prefix")
assertEqual(r2[0][DHCP6OptIAPrefix].prefix, r3[0][DHCP6OptIAPrefix].prefix, "expected the original prefix to be renewed")
|
gpl-3.0
| -1,700,819,788,144,245,000
| 58.178571
| 170
| 0.699064
| false
| 3.204062
| false
| false
| false
|
sidorov-si/ngser
|
filter_gbk_by_cds.py
|
1
|
3272
|
#!/usr/bin/env python
"""
Filter GBK file by CDS: retain only those records which have correct CDS.
Correct CDS must:
1) contain a 'product' field;
2) have length that is a multiple of 3;
3) have start and stop codons.
Usage:
filter_gbk_by_cds.py -i <input_GBK_file> -o <output_GBK_file>
Options:
-h --help Show this screen.
--version Show version.
-i <input_GBK_file> Input GBK file.
-o <output_GBK_file> Output GBK file.
"""
import sys
print
modules = ["docopt", "os", "Bio"]
exit_flag = False
for module in modules:
try:
__import__(module)
except ImportError:
exit_flag = True
sys.stderr.write("Error: Python module " + module + " is not installed.\n")
if exit_flag:
sys.stderr.write("You can install these modules with a command: pip install <module>\n")
sys.stderr.write("(Administrator privileges may be required.)\n")
sys.exit(1)
from docopt import docopt
from os.path import exists
from os.path import isfile
from Bio import SeqIO
def filter_gbk(input_gbk_filename, output_gbk_filename):
print 'Input GBK file: ', input_gbk_filename
print 'Output GBK file: ', output_gbk_filename
with open(output_gbk_filename, 'w') as outfile:
print 'Start filtering.'
total_count = 0
filtered_count = 0
for index, record in enumerate(SeqIO.parse(open(input_gbk_filename), "genbank")):
cds_exists = False
for number, feature in enumerate(record.features):
if feature.type == 'CDS' and 'product' in feature.qualifiers:
cds_exists = True
try:
_ = feature.extract(record.seq).translate(cds = True)
except Exception, e:
# something is wrong with this CDS (start and/or stop codons are missing,
# or the lenght of CDS is not a multiple of 3)
print 'Record', record.id, ':', str(e), '=> Filtered out.'
filtered_count += 1
continue # just take the next locus
SeqIO.write(record, outfile, "genbank")
if not cds_exists:
print 'Record', record.id, ':', 'No CDS => Filtered out.'
filtered_count += 1
if index % 100 == 0 and index != 0:
print index, 'records are processed.'
total_count = index + 1
print 'Finished filtering.'
print total_count, 'records were processed.'
print filtered_count, 'of them were filtered out.'
print str(total_count - filtered_count), 'records remain.'
if __name__ == '__main__':
arguments = docopt(__doc__, version='filter_gbk_by_cds 0.2')
input_gbk_filename = arguments["-i"]
if not exists(input_gbk_filename):
print "Error: Can't find an input GBK file: no such file '" + \
input_gbk_filename + "'. Exit.\n"
sys.exit(1)
if not isfile(input_gbk_filename):
print "Error: Input GBK file must be a regular file. " + \
"Something else given. Exit.\n"
sys.exit(1)
output_gbk_filename = arguments["-o"].rstrip('/')
filter_gbk(input_gbk_filename, output_gbk_filename)
|
gpl-2.0
| -5,694,534,526,533,918,000
| 34.565217
| 97
| 0.587408
| false
| 3.722412
| false
| false
| false
|
rolisz/walter_experiments
|
motion/control.py
|
1
|
1293
|
from ssc32 import *
from time import sleep
from smooth import getPositions
# Run with sudo
ssc = SSC32('/dev/ttyUSB0', 115200)
ssc[0].degrees = 20
ssc[0].max = 2500
ssc[0].min = 500
ssc[0].deg_max = +90.0
ssc[0].deg_min = -90.0
#TODO: fix library so it doesn't take 100ms for the first instruction
# And which overrides the current command even if it has the same targetDegs
def moveTo(motor, mi, time, targetDegs, dryRun=True):
currDegs = motor[mi].degrees
motor[mi].degrees = targetDegs
if dryRun:
print time, motor[mi].degrees
else:
motor.commit(time*1000)
sleep(time)
def smoothMoveTo(motor, mi, time, targetDegs, dryRun=True):
freq = 100.0
timePerStep = time/freq
currDegs = motor[mi].degrees
distToGo = targetDegs - currDegs
for pos in getPositions(currDegs, targetDegs, freq):
moveTo(motor, mi, timePerStep, pos, dryRun)
#elbow: 35 -> -100
#ssc[0].degrees = 0
#ssc.commit(4000)
ssc[0].degrees = -30
ssc.commit(1000)
x=-30
while True:
k = raw_input()
if x < 30:
x = 30
moveTo(ssc, 0, 1, x, True)
else:
x = -30
smoothMoveTo(ssc, 0, 1, x, True)
ssc.close()
#
#minus = -1
#while True:
# ssc[0].degrees = minus * 20
# ssc.commit()
# minus *= -1
# sleep(4)
|
mit
| 2,671,244,723,943,717,400
| 22.089286
| 76
| 0.63109
| false
| 2.798701
| false
| false
| false
|
pghant/big-theta
|
scraper/EquationScraper/pipelines.py
|
1
|
3154
|
# -*- coding: utf-8 -*-
from py2neo import authenticate, Graph, Node, Relationship
from scrapy import signals, exceptions
from scrapy.exporters import JsonLinesItemExporter, PprintItemExporter
from .latex import utils as latexutils
class EquationscraperPipeline(object):
def __init__(self):
self.jsl_exporter = None
self.pprnt_exporter = None
self.files = {}
authenticate('localhost:7474', 'neo4j', 'big-theta-team')
self.graph = Graph('localhost:7474/db/data')
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
file_pprnt = open('%s_pprint-items0' % spider.name, 'w+b', )
file_jsl = open('%s_json-items0' % spider.name, 'w+b', )
self.jsl_exporter = JsonLinesItemExporter(file_jsl)
self.pprnt_exporter = PprintItemExporter(file_pprnt)
self.files[spider] = [file_pprnt, file_jsl]
self.pprnt_exporter.indent = 2
self.pprnt_exporter.start_exporting()
self.jsl_exporter.start_exporting()
def spider_closed(self, spider):
self.pprnt_exporter.finish_exporting()
self.jsl_exporter.finish_exporting()
for f in self.files[spider]:
f.close()
def process_item(self, item, spider):
if spider.settings.getbool("EXPORT_JSON"):
self.pprnt_exporter.export_item(item)
self.jsl_exporter.export_item(item)
node_equation_label = 'EQUATION'
node_subject_label = 'SUBJECT'
link_relation = 'LINKS_TO'
page_relation = 'SAME_PAGE_AS'
item_array = [item['last_item'].copy(), item.copy()]
subject_nodes_array = []
for idx, elem in enumerate(item_array):
subject_nodes_array.append(Node(node_subject_label,
title=item_array[idx]['title'],
url=item_array[idx]['url'],
categories=item_array[idx]['categories']))
for expression in elem['maths']:
expression = latexutils.strip_styles(expression)
if latexutils.contains_equality_command(expression):
latex_equation_node = Node(node_equation_label,
name='Equation<' + item_array[idx]['title'] + '>',
equation=expression)
self.graph.merge(Relationship(subject_nodes_array[idx],
page_relation,
latex_equation_node,
distance=0))
self.graph.merge(Relationship(subject_nodes_array[0], link_relation, subject_nodes_array[1],
distance=item_array[1]['link_dist']))
del item
raise exceptions.DropItem
|
mit
| -8,319,851,079,591,015,000
| 35.674419
| 100
| 0.55929
| false
| 4.177483
| false
| false
| false
|
mmilutinovic1313/zipline-with-algorithms
|
tests/test_sources.py
|
1
|
7302
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from itertools import cycle
import numpy as np
from six import integer_types
from unittest import TestCase
import zipline.utils.factory as factory
from zipline.sources import (DataFrameSource,
DataPanelSource,
RandomWalkSource)
from zipline.utils import tradingcalendar as calendar_nyse
from zipline.finance.trading import with_environment
class TestDataFrameSource(TestCase):
def test_df_source(self):
source, df = factory.create_test_df_source()
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for expected_dt, expected_price in df.iterrows():
sid0 = next(source)
assert expected_dt == sid0.dt
assert expected_price[0] == sid0.price
def test_df_sid_filtering(self):
_, df = factory.create_test_df_source()
source = DataFrameSource(df, sids=[0])
assert 1 not in [event.sid for event in source], \
"DataFrameSource should only stream selected sid 0, not sid 1."
def test_panel_source(self):
source, panel = factory.create_test_panel_source(source_type=5)
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertTrue('sid' in event)
self.assertTrue('arbitrary' in event)
self.assertTrue('type' in event)
self.assertTrue(hasattr(event, 'volume'))
self.assertTrue(hasattr(event, 'price'))
self.assertEquals(event['type'], 5)
self.assertEquals(event['arbitrary'], 1.)
self.assertEquals(event['sid'], 0)
self.assertTrue(isinstance(event['volume'], int))
self.assertTrue(isinstance(event['arbitrary'], float))
@with_environment()
def test_yahoo_bars_to_panel_source(self, env=None):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = factory.load_bars_from_yahoo(stocks=stocks,
indexes={},
start=start,
end=end)
check_fields = ['sid', 'open', 'high', 'low', 'close',
'volume', 'price']
source = DataPanelSource(data)
sids = [
asset.sid for asset in
[env.asset_finder.lookup_symbol(symbol, as_of_date=end)
for symbol in stocks]
]
stocks_iter = cycle(sids)
for event in source:
for check_field in check_fields:
self.assertIn(check_field, event)
self.assertTrue(isinstance(event['volume'], (integer_types)))
self.assertEqual(next(stocks_iter), event['sid'])
@with_environment()
def test_nan_filter_dataframe(self, env=None):
env.update_asset_finder(identifiers=[4, 5])
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.DataFrame(np.random.randn(2, 2),
index=dates,
columns=[4, 5])
# should be filtered
df.loc[dates[0], 4] = np.nan
# should not be filtered, should have been ffilled
df.loc[dates[1], 5] = np.nan
source = DataFrameSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
@with_environment()
def test_nan_filter_panel(self, env=None):
env.update_asset_finder(identifiers=[4, 5])
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.Panel(np.random.randn(2, 2, 2),
major_axis=dates,
items=[4, 5],
minor_axis=['price', 'volume'])
# should be filtered
df.loc[4, dates[0], 'price'] = np.nan
# should not be filtered, should have been ffilled
df.loc[5, dates[1], 'price'] = np.nan
source = DataPanelSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
class TestRandomWalkSource(TestCase):
def test_minute(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1991-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end)
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertTrue(13 <= event.dt.hour <= 21,
"event.dt.hour == %i, not during market \
hours." % event.dt.hour)
def test_day(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1992-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end, freq='daily')
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertEqual(event.dt.hour, 0)
|
apache-2.0
| 1,614,067,241,080,615,000
| 40.022472
| 75
| 0.574363
| false
| 3.996716
| true
| false
| false
|
renzon/gaecookie
|
setup.py
|
1
|
4928
|
import codecs
import os
import sys
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools import setup, find_packages
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ["*.py", "*.pyc", "*$py.class", "*~", ".*", "*.bak"]
standard_exclude_directories = [
".*", "CVS", "_darcs", "./build", "./dist", "EGG-INFO", "*.egg-info"
]
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don"t match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won"t be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren"t
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append((fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
PACKAGE = "gaecookie"
DESCRIPTION = "A library for signing string and validating cookies. cd .." \
""
NAME = "gaecookie"
AUTHOR = "Renzo Nuccitelli"
AUTHOR_EMAIL = "renzo.n@gmail.com"
URL = "https://github.com/renzon/gaecookie"
VERSION = __import__(PACKAGE).__version__
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="BSD",
url=URL,
packages=find_packages(exclude=["tests.*", "tests"]),
package_data=find_package_data(PACKAGE, only_in_packages=False),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Paste",
],
zip_safe=False,
install_requires=[
'gaebusiness>=4.0',
'gaegraph>=2.3',
'tekton>=3.1.1'
]
)
|
mit
| 6,134,216,573,355,813,000
| 33.950355
| 86
| 0.551136
| false
| 4.281494
| false
| false
| false
|
peret/visualize-bovw
|
tests/test_caltech_manager.py
|
1
|
8382
|
import unittest
from datamanagers.CaltechManager import CaltechManager
from datamanagers import InvalidDatasetException, NoSuchCategoryException
import os
import numpy as np
from sklearn.decomposition import PCA
from test_datamanager import BASE_PATH
class TestCaltechManager(unittest.TestCase):
def setUp(self):
self.datamanager = CaltechManager()
self.datamanager.change_base_path(os.path.join(BASE_PATH, "testdata"))
def test_invalid_dataset_caltech(self):
self.assertRaises(InvalidDatasetException, self.datamanager.build_sample_matrix, "rubbish", "test")
def test_invalid_dataset_caltech2(self):
self.assertRaises(InvalidDatasetException, self.datamanager.build_class_vector, "rubbish", "test")
def test_invalid_category_caltech(self):
self.assertRaises(NoSuchCategoryException, self.datamanager.get_positive_samples, "test", "rubbish")
def test_invalid_category_caltech2(self):
self.assertRaises(NoSuchCategoryException, self.datamanager.build_sample_matrix, "test", "rubbish")
def test_training_sample_matrix(self):
samples = self.datamanager.build_sample_matrix("train", "TestFake")
should_be = np.array([
[ 0.44842428, 0.50402522, 0.45302102, 0.54796243, 0.82176286, 0.11623112],
[ 0.31588301, 0.05166245, 0.16203263, 0.02196996, 0.96935761, 0.9854272 ],
[ 0.12442154, 0.57743013, 0.9548108 , 0.22592719, 0.10155164, 0.60750473],
[ 0.53320956, 0.18181397, 0.60112703, 0.09004746, 0.31448245, 0.85619318],
[ 0.18139255, 0.83218205, 0.87969971, 0.81630158, 0.57571691, 0.08127511]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
def test_test_sample_matrix(self):
samples = self.datamanager.build_sample_matrix("test", "TestFake")
should_be = np.array([
[ 0.64663881, 0.55629711, 0.11966438, 0.04559849, 0.69156636, 0.4500224 ],
[ 0.38948518, 0.33885501, 0.567841 , 0.36167425, 0.18220702, 0.57701336],
[ 0.08660618, 0.83642531, 0.9239062 , 0.53778457, 0.56708116, 0.13766008],
[ 0.31313366, 0.88874122, 0.20000355, 0.56186443, 0.15771926, 0.81349361]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
def test_all_sample_matrix(self):
samples = self.datamanager.build_sample_matrix("all", "TestFake")
should_be = np.array([
[ 0.44842428, 0.50402522, 0.45302102, 0.54796243, 0.82176286, 0.11623112],
[ 0.31588301, 0.05166245, 0.16203263, 0.02196996, 0.96935761, 0.9854272 ],
[ 0.64663881, 0.55629711, 0.11966438, 0.04559849, 0.69156636, 0.4500224 ],
[ 0.38948518, 0.33885501, 0.567841 , 0.36167425, 0.18220702, 0.57701336],
[ 0.12442154, 0.57743013, 0.9548108 , 0.22592719, 0.10155164, 0.60750473],
[ 0.53320956, 0.18181397, 0.60112703, 0.09004746, 0.31448245, 0.85619318],
[ 0.18139255, 0.83218205, 0.87969971, 0.81630158, 0.57571691, 0.08127511],
[ 0.08660618, 0.83642531, 0.9239062 , 0.53778457, 0.56708116, 0.13766008],
[ 0.31313366, 0.88874122, 0.20000355, 0.56186443, 0.15771926, 0.81349361]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
def test_all_sample_matrix_exclude_feature(self):
self.datamanager.exclude_feature = 4
samples = self.datamanager.build_sample_matrix("all", "TestFake")
should_be = np.array([
[ 0.44842428, 0.50402522, 0.45302102, 0.54796243, 0.11623112],
[ 0.31588301, 0.05166245, 0.16203263, 0.02196996, 0.9854272 ],
[ 0.64663881, 0.55629711, 0.11966438, 0.04559849, 0.4500224 ],
[ 0.38948518, 0.33885501, 0.567841 , 0.36167425, 0.57701336],
[ 0.12442154, 0.57743013, 0.9548108 , 0.22592719, 0.60750473],
[ 0.53320956, 0.18181397, 0.60112703, 0.09004746, 0.85619318],
[ 0.18139255, 0.83218205, 0.87969971, 0.81630158, 0.08127511],
[ 0.08660618, 0.83642531, 0.9239062 , 0.53778457, 0.13766008],
[ 0.31313366, 0.88874122, 0.20000355, 0.56186443, 0.81349361]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
@unittest.expectedFailure # TODO: dependent on file order
def test_complete_sample_matrix(self):
samples = self.datamanager.build_complete_sample_matrix("train")
should_be = np.array([
[ 0.31313366, 0.88874122, 0.20000355, 0.56186443, 0.15771926, 0.81349361],
[ 0.12442154, 0.57743013, 0.9548108 , 0.22592719, 0.10155164, 0.60750473],
[ 0.53320956, 0.18181397, 0.60112703, 0.09004746, 0.31448245, 0.85619318],
[ 0.18139255, 0.83218205, 0.87969971, 0.81630158, 0.57571691, 0.08127511],
[ 0.44842428, 0.50402522, 0.45302102, 0.54796243, 0.82176286, 0.11623112],
[ 0.31588301, 0.05166245, 0.16203263, 0.02196996, 0.96935761, 0.9854272 ],
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
@unittest.expectedFailure # TODO: dependent on file order
def test_complete_sample_matrix_exclude_feature(self):
self.datamanager.exclude_feature = 1
samples = self.datamanager.build_complete_sample_matrix("train")
should_be = np.array([
[ 0.31313366, 0.20000355, 0.56186443, 0.15771926, 0.81349361],
[ 0.12442154, 0.9548108 , 0.22592719, 0.10155164, 0.60750473],
[ 0.53320956, 0.60112703, 0.09004746, 0.31448245, 0.85619318],
[ 0.18139255, 0.87969971, 0.81630158, 0.57571691, 0.08127511],
[ 0.44842428, 0.45302102, 0.54796243, 0.82176286, 0.11623112],
[ 0.31588301, 0.16203263, 0.02196996, 0.96935761, 0.9854272 ],
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
def test_complete_sample_matrix_fail(self):
self.assertRaises(NotImplementedError, self.datamanager.build_complete_sample_matrix, "all")
def test_training_class_vector(self):
classes = self.datamanager.build_class_vector("train", "TestFake")
should_be = np.array([1, 1, 0, 0, 0])
self.assertTrue((classes==should_be).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, classes))
def test_test_class_vector(self):
classes = self.datamanager.build_class_vector("test", "TestFake")
should_be = np.array([1, 1, 0, 0])
self.assertTrue((classes==should_be).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, classes))
def test_complete_class_vector(self):
classes = self.datamanager.build_class_vector("all", "TestFake")
should_be = np.array([1, 1, 1, 1, 0, 0, 0, 0, 0])
self.assertTrue((classes==should_be).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, classes))
def test_sample_matrix_pca(self):
self.datamanager.use_pca(n_components = 1)
samples = self.datamanager.build_sample_matrix("all", "TestFake")
should_be = np.array([
[-0.24263228],
[0.85717554],
[0.29054203],
[0.03857126],
[-0.18379566],
[0.44021899],
[-0.78841356],
[-0.65111911],
[-0.08255303]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
|
gpl-2.0
| 71,504,931,090,516,424
| 56.417808
| 117
| 0.620496
| false
| 2.724967
| true
| false
| false
|
Mendelone/forex_trading
|
Algorithm.Python/CustomDataBitcoinAlgorithm.py
|
1
|
5264
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data import SubscriptionDataSource
from QuantConnect.Python import PythonData
from datetime import date, timedelta, datetime
import decimal
import numpy as np
import json
class CustomDataBitcoinAlgorithm(QCAlgorithm):
'''3.0 CUSTOM DATA SOURCE: USE YOUR OWN MARKET DATA (OPTIONS, FOREX, FUTURES, DERIVATIVES etc).
The new QuantConnect Lean Backtesting Engine is incredibly flexible and allows you to define your own data source.
This includes any data source which has a TIME and VALUE. These are the *only* requirements.
To demonstrate this we're loading in "Bitcoin" data.'''
def Initialize(self):
self.SetStartDate(2011, 9, 13)
self.SetEndDate(datetime.now().date() - timedelta(1))
self.SetCash(100000)
# Define the symbol and "type" of our generic data:
self.AddData(Bitcoin, "BTC")
def OnData(self, data):
if "BTC" not in data: return
close = data["BTC"].Close
# If we don't have any weather "SHARES" -- invest"
if not self.Portfolio.Invested:
# Weather used as a tradable asset, like stocks, futures etc.
self.SetHoldings("BTC", 1)
self.Debug("Buying BTC 'Shares': BTC: {0}".format(close))
self.Debug("Time: {0} {1}".format(datetime.now(), close))
class Bitcoin(PythonData):
'''Custom Data Type: Bitcoin data from Quandl - http://www.quandl.com/help/api-for-bitcoin-data'''
def GetSource(self, config, date, isLiveMode):
if isLiveMode:
return SubscriptionDataSource("https://www.bitstamp.net/api/ticker/", SubscriptionTransportMedium.Rest);
#return "http://my-ftp-server.com/futures-data-" + date.ToString("Ymd") + ".zip";
# OR simply return a fixed small data file. Large files will slow down your backtest
return SubscriptionDataSource("http://www.quandl.com/api/v1/datasets/BCHARTS/BITSTAMPUSD.csv?sort_order=asc", SubscriptionTransportMedium.RemoteFile);
def Reader(self, config, line, date, isLiveMode):
coin = Bitcoin()
coin.Symbol = config.Symbol
if isLiveMode:
# Example Line Format:
# {"high": "441.00", "last": "421.86", "timestamp": "1411606877", "bid": "421.96", "vwap": "428.58", "volume": "14120.40683975", "low": "418.83", "ask": "421.99"}
try:
liveBTC = json.loads(line)
# If value is zero, return None
value = decimal.Decimal(liveBTC["last"])
if value == 0: return None
coin.Time = datetime.now()
coin.Value = value
coin["Open"] = float(liveBTC["open"])
coin["High"] = float(liveBTC["high"])
coin["Low"] = float(liveBTC["low"])
coin["Close"] = float(liveBTC["last"])
coin["Ask"] = float(liveBTC["ask"])
coin["Bid"] = float(liveBTC["bid"])
coin["VolumeBTC"] = float(liveBTC["volume"])
coin["WeightedPrice"] = float(liveBTC["vwap"])
return coin
except ValueError:
# Do nothing, possible error in json decoding
return None
# Example Line Format:
# Date Open High Low Close Volume (BTC) Volume (Currency) Weighted Price
# 2011-09-13 5.8 6.0 5.65 5.97 58.37138238, 346.0973893944 5.929230648356
if not (line.strip() and line[0].isdigit()): return None
try:
data = line.split(',')
# If value is zero, return None
value = decimal.Decimal(data[4])
if value == 0: return None
coin.Time = datetime.strptime(data[0], "%Y-%m-%d")
coin.Value = value
coin["Open"] = float(data[1])
coin["High"] = float(data[2])
coin["Low"] = float(data[3])
coin["Close"] = float(data[4])
coin["VolumeBTC"] = float(data[5])
coin["VolumeUSD"] = float(data[6])
coin["WeightedPrice"] = float(data[7])
return coin;
except ValueError:
# Do nothing, possible error in json decoding
return None
|
apache-2.0
| -5,720,431,468,602,672,000
| 40.769841
| 174
| 0.605663
| false
| 3.877671
| false
| false
| false
|
jiejieling/RdsMonitor
|
src/api/controller/BaseController.py
|
1
|
3536
|
from dataprovider.dataprovider import RedisLiveDataProvider
import tornado.ioloop
import tornado.web
import dateutil.parser
class BaseController(tornado.web.RequestHandler):
def initialize(self):
self.stats_provider = RedisLiveDataProvider().get_provider()
def datetime_to_list(self, datetime):
"""Converts a datetime to a list.
Args:
datetime (datetime): The datetime to convert.
"""
parsed_date = dateutil.parser.parse(datetime)
# don't return the last two fields, we don't want them.
return tuple(parsed_date.timetuple())[:-2]
# todo : fix this
def average_data(self, data):
"""Averages data.
TODO: More docstring here, once functionality is understood.
"""
average = []
deviation=1024*1024
start = dateutil.parser.parse(data[0][0])
end = dateutil.parser.parse(data[-1][0])
difference = end - start
weeks, days = divmod(difference.days, 7)
minutes, seconds = divmod(difference.seconds, 60)
hours, minutes = divmod(minutes, 60)
# TODO: These if/elif/else branches chould probably be broken out into
# individual functions to make it easier to follow what's going on.
if difference.days > 0:
current_max = 0
current_current = 0
current_d = 0
for dt, max_memory, current_memory in data:
d = dateutil.parser.parse(dt)
if d.day != current_d:
current_d = d.day
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
else:
if max_memory > current_max or \
current_memory > current_current:
average.pop()
average.append([dt, max_memory, current_memory])
current_max=max_memory
current_current=current_memory
elif hours > 0:
current_max = 0
current_current = 0
current = -1
keep_flag = False
for dt, max_memory, current_memory in data:
d = dateutil.parser.parse(dt)
if d.hour != current:
current = d.hour
average.append([dt, max_memory, current_memory])
current_max=max_memory
current_current=current_memory
keep_flag=False
elif abs(max_memory - current_max) > deviation or \
abs(current_memory - current_current) > deviation:
#average.pop()
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
keep_flag = True
elif max_memory > current_max or \
current_memory > current_current:
if keep_flag != True:
average.pop()
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
keep_flag = False
else:
current_max = 0
current_current = 0
current_m = -1
keep_flag = False
for dt, max_memory, current_memory in data:
d = dateutil.parser.parse(dt)
if d.minute != current_m:
current_m = d.minute
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
keep_flag = False
elif abs(max_memory - current_max) > deviation or \
abs(current_memory - current_current) > deviation:
#average.pop()
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
keep_flag = True
elif max_memory > current_max or \
current_memory > current_current:
if keep_flag!=True:
average.pop()
average.append([dt,max_memory,current_memory])
current_max=max_memory
current_current=current_memory
keep_flag=False
return average
|
mit
| 2,281,642,187,381,527,300
| 29.222222
| 72
| 0.670532
| false
| 3.361217
| false
| false
| false
|
Aloomaio/googleads-python-lib
|
examples/ad_manager/v201808/inventory_service/update_ad_units.py
|
1
|
2752
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates ad unit sizes by adding a banner ad size.
To determine which ad units exist, run get_all_ad_units.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the ad unit to get.
AD_UNIT_ID = 'INSERT_AD_UNIT_ID_HERE'
def main(client, ad_unit_id):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201808')
# Create a statement to select a single ad unit by ID.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('id = :id')
.WithBindVariable('id', ad_unit_id))
# Get ad units by statement.
response = inventory_service.getAdUnitsByStatement(
statement.ToStatement())
# Add the size 468x60 to the ad unit.
ad_unit_size = {
'size': {
'width': '468',
'height': '60'
},
'environmentType': 'BROWSER'
}
if 'results' in response and len(response['results']):
updated_ad_units = []
for ad_unit in response['results']:
if 'adUnitSizes' not in ad_unit:
ad_unit['adUnitSizes'] = []
ad_unit['adUnitSizes'].append(ad_unit_size)
updated_ad_units.append(ad_unit)
# Update ad unit on the server.
ad_units = inventory_service.updateAdUnits(updated_ad_units)
# Display results.
for ad_unit in ad_units:
ad_unit_sizes = ['{%s x %s}' % (size['size']['width'],
size['size']['height'])
for size in ad_unit['adUnitSizes']]
print ('Ad unit with ID "%s", name "%s", and sizes [%s] was updated'
% (ad_unit['id'], ad_unit['name'], ','.join(ad_unit_sizes)))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, AD_UNIT_ID)
|
apache-2.0
| 7,114,115,951,235,613,000
| 32.560976
| 78
| 0.665334
| false
| 3.73913
| false
| false
| false
|
iagcl/data_pipeline
|
tests/applier/data_greenplum_cdc_applier.py
|
1
|
3253
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import data_pipeline.constants.const as const
import data_postgres_cdc_applier
from .data_common import TestCase, UPDATE_SSP_SQL
tests = [
TestCase(
description="Apply update statement containing a single primary key SET with end of batch",
input_table_name="CONNCT_CDC_PK5_COLS10",
input_commit_statements=[
'',
'update "SYS"."CONNCT_CDC_PK5_COLS10" set "COMPNDPK_1" = \'0\' where "COMPNDPK_1" = \'26\'',
''],
input_record_types=[const.START_OF_BATCH, const.DATA, const.END_OF_BATCH],
input_operation_codes=['', const.UPDATE, ''],
input_primary_key_fields="COMPNDPK_1",
input_record_counts=[0, 0, 1],
input_commit_lsns=[0, 0, 0],
expect_sql_execute_called=[
None,
None,
None],
expect_execute_called_times=[0, 0, 0],
expect_audit_db_execute_sql_called=[None, None, (UPDATE_SSP_SQL, ('CDCApply', 0, 'myprofile', 1, 'ctl', 'connct_cdc_pk5_cols10'))],
expect_commit_called_times=[0, 0, 1],
expect_insert_row_count=[0, 0, 0],
expect_update_row_count=[0, 1, 1],
expect_delete_row_count=[0, 0, 0],
expect_source_row_count=[0, 1, 1],
expect_batch_committed=[const.UNCOMMITTED, const.UNCOMMITTED, const.COMMITTED,]
)
, TestCase(
description="Apply update statement containing a primary key and non-primary key in SET with end of batch",
input_table_name="CONNCT_CDC_PK5_COLS10",
input_commit_statements=[
'',
'update "SYS"."CONNCT_CDC_PK5_COLS10" set "COMPNDPK_1" = \'0\', "COL_V_2" = \'26.9\' where "COMPNDPK_1" = \'26\'',
''],
input_record_types=[const.START_OF_BATCH, const.DATA, const.END_OF_BATCH],
input_operation_codes=['', const.UPDATE, ''],
input_primary_key_fields="COMPNDPK_1",
input_record_counts=[0, 0, 1],
input_commit_lsns=[0, 0, 0],
expect_sql_execute_called=[
None,
"UPDATE ctl.CONNCT_CDC_PK5_COLS10 SET COL_V_2 = '26.9' WHERE COMPNDPK_1 = '26'; -- lsn: 0, offset: 1",
None],
expect_execute_called_times=[0, 1, 1],
expect_audit_db_execute_sql_called=[None, None, (UPDATE_SSP_SQL, ('CDCApply', 0, 'myprofile', 1, 'ctl', 'connct_cdc_pk5_cols10'))],
expect_commit_called_times=[0, 0, 1],
expect_insert_row_count=[0, 0, 0],
expect_update_row_count=[0, 1, 1],
expect_delete_row_count=[0, 0, 0],
expect_source_row_count=[0, 1, 1],
expect_batch_committed=[const.UNCOMMITTED, const.UNCOMMITTED, const.COMMITTED,]
)
]
|
apache-2.0
| 3,493,367,925,225,126,000
| 40.177215
| 135
| 0.664617
| false
| 3.139961
| false
| false
| false
|
google-research/google-research
|
bigg/bigg/torch_ops/tensor_ops.py
|
1
|
3956
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: skip-file
import torch
from torch.nn import Module
from torch.nn.parameter import Parameter
from torch.autograd import Function
import numpy as np
from bigg.common.consts import t_float
class MultiIndexSelectFunc(Function):
@staticmethod
def forward(ctx, idx_froms, idx_tos, *mats):
assert len(idx_tos) == len(idx_froms) == len(mats)
cols = mats[0].shape[1]
assert all([len(x.shape) == 2 for x in mats])
assert all([x.shape[1] == cols for x in mats])
num_rows = sum([len(x) for x in idx_tos])
out = mats[0].new(num_rows, cols)
for i, mat in enumerate(mats):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
out[x_to] = mat.detach()
else:
assert len(x_from) == len(x_to)
out[x_to] = mat[x_from].detach()
ctx.idx_froms = idx_froms
ctx.idx_tos = idx_tos
ctx.shapes = [x.shape for x in mats]
return out
@staticmethod
def backward(ctx, grad_output):
idx_froms, idx_tos = ctx.idx_froms, ctx.idx_tos
list_grad_mats = [None, None]
for i in range(len(idx_froms)):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
grad_mat = grad_output[x_to].detach()
else:
grad_mat = grad_output.new(ctx.shapes[i]).zero_()
grad_mat[x_from] = grad_output[x_to].detach()
list_grad_mats.append(grad_mat)
return tuple(list_grad_mats)
class MultiIndexSelect(Module):
def forward(self, idx_froms, idx_tos, *mats):
return MultiIndexSelectFunc.apply(idx_froms, idx_tos, *mats)
multi_index_select = MultiIndexSelect()
def test_multi_select():
a = Parameter(torch.randn(4, 2))
b = Parameter(torch.randn(3, 2))
d = Parameter(torch.randn(5, 2))
idx_froms = [[0, 1], [1, 2], [3, 4]]
idx_tos = [[4, 5], [0, 1], [2, 3]]
c = multi_index_select(idx_froms, idx_tos, a, b, d)
print('===a===')
print(a)
print('===b===')
print(b)
print('===d===')
print(d)
print('===c===')
print(c)
t = torch.sum(c)
t.backward()
print(a.grad)
print(b.grad)
print(d.grad)
class PosEncoding(Module):
def __init__(self, dim, device, base=10000, bias=0):
super(PosEncoding, self).__init__()
p = []
sft = []
for i in range(dim):
b = (i - i % 2) / dim
p.append(base ** -b)
if i % 2:
sft.append(np.pi / 2.0 + bias)
else:
sft.append(bias)
self.device = device
self.sft = torch.tensor(sft, dtype=t_float).view(1, -1).to(device)
self.base = torch.tensor(p, dtype=t_float).view(1, -1).to(device)
def forward(self, pos):
with torch.no_grad():
if isinstance(pos, list):
pos = torch.tensor(pos, dtype=t_float).to(self.device)
pos = pos.view(-1, 1)
x = pos / self.base + self.sft
return torch.sin(x)
if __name__ == '__main__':
# test_multi_select()
pos_enc = PosEncoding(128, 'cpu')
print(pos_enc([1, 2, 3]))
|
apache-2.0
| 5,617,877,974,705,925,000
| 28.969697
| 74
| 0.571284
| false
| 3.253289
| false
| false
| false
|
PurpleMyst/porcupine
|
porcupine/plugins/statusbar.py
|
1
|
1725
|
from tkinter import ttk
from porcupine import get_tab_manager, utils
# i have experimented with a logging handler that displays logging
# messages in the label, but it's not as good idea as it sounds like,
# not all INFO messages are something that users should see all the time
# this widget is kind of weird
class LabelWithEmptySpaceAtLeft(ttk.Label):
def __init__(self, master):
self._spacer = ttk.Frame(master)
self._spacer.pack(side='left', expand=True)
super().__init__(master)
self.pack(side='left')
def destroy(self):
self._spacer.destroy()
super().destroy()
class StatusBar(ttk.Frame):
def __init__(self, master, tab):
super().__init__(master)
self.tab = tab
# one label for each tab-separated thing
self.labels = [ttk.Label(self)]
self.labels[0].pack(side='left')
tab.bind('<<StatusChanged>>', self.do_update, add=True)
self.do_update()
# this is do_update() because tkinter has a method called update()
def do_update(self, junk=None):
parts = self.tab.status.split('\t')
# there's always at least one part, the label added in
# __init__ is not destroyed here
while len(self.labels) > len(parts):
self.labels.pop().destroy()
while len(self.labels) < len(parts):
self.labels.append(LabelWithEmptySpaceAtLeft(self))
for label, text in zip(self.labels, parts):
label['text'] = text
def on_new_tab(event):
tab = event.data_widget
StatusBar(tab.bottom_frame, tab).pack(side='bottom', fill='x')
def setup():
utils.bind_with_data(get_tab_manager(), '<<NewTab>>', on_new_tab, add=True)
|
mit
| -7,980,209,914,607,490,000
| 29.263158
| 79
| 0.628406
| false
| 3.66242
| false
| false
| false
|
rgeorgi/intent
|
intent/utils/dicts.py
|
1
|
20697
|
"""
Created on Aug 26, 2013
@author: rgeorgi
"""
import sys, re, unittest
from collections import defaultdict, Callable, OrderedDict
class CountDict(object):
def __init__(self):
self._dict = defaultdict(int)
def add(self, key, value=1):
self[key] += value
def __str__(self):
return self._dict.__str__()
def __repr__(self):
return self._dict.__repr__()
def distribution(self, use_keys = list, add_n = 0):
return {k:(self[k] + add_n)/(self.total()*add_n) for k in self.keys()}
def total(self):
values = self._dict.values()
total = 0
for v in values:
total += v
return total
#===========================================================================
# Stuff that should be inheritable
#===========================================================================
def __getitem__(self, k):
return self._dict.__getitem__(k)
def __setitem__(self, k, v):
self._dict.__setitem__(k, v)
def __contains__(self, k):
return self._dict.__contains__(k)
def __len__(self):
return self._dict.__len__()
def __delitem__(self, k):
self._dict.__delitem__(k)
def keys(self):
return self._dict.keys()
def items(self):
return self._dict.items()
# -----------------------------------------------------------------------------
def largest(self):
return sorted(self.items(), reverse=True, key=lambda k: k[1])[0]
def most_frequent(self, minimum = 0, num = 1):
"""
Return the @num entries with the highest counts that
also have at least @minimum occurrences.
@param minimum: int
@param num: int
"""
items = list(self.items())
items.sort(key = lambda item: item[1], reverse=True)
ret_items = []
for item in items:
if item[1] > minimum:
ret_items.append(item[0])
if num and len(ret_items) == num:
break
return ret_items
def most_frequent_counts(self, minimum = 0, num = 1):
most_frequent_keys = self.most_frequent(minimum, num)
return [(key, self[key]) for key in most_frequent_keys]
def __add__(self, other):
d = self.__class__()
for key in self.keys():
d.add(key, self[key])
for key in other.keys():
d.add(key, other[key])
return d
class DefaultOrderedDict(OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
class TwoLevelCountDict(object):
def __init__(self):
self._dict = defaultdict(CountDict)
def __add__(self, other):
new = self.__class__()
for key_a in other.keys():
for key_b in other[key_a].keys():
new.add(key_a, key_b, other[key_a][key_b])
for key_a in self.keys():
for key_b in self[key_a].keys():
new.add(key_a, key_b, self[key_a][key_b])
return new
def combine(self, other):
for key_a in other.keys():
for key_b in other[key_a].keys():
self.add(key_a, key_b, other[key_a][key_b])
def add(self, key_a, key_b, value=1):
self[key_a][key_b] += value
def top_n(self, key, n=1, min_num = 1, key2_re = None):
s = sorted(self[key].items(), reverse=True, key=lambda x: x[1])
if key2_re:
s = [i for i in s if re.search(key2_re, i[0])]
return s[0:n]
def most_frequent(self, key, num = 1, key2_re = ''):
most_frequent = None
biggest_count = 0
for key2 in self[key].keys():
# The key2_re is used to ignore certain keys
if key2_re and re.search(key2_re, key2):
continue
else:
count = self[key][key2]
if count > biggest_count and count >= num:
most_frequent = key2
biggest_count = count
return most_frequent
def fulltotal(self):
total = 0
for key in self.keys():
total += self.total(key)
return total
def total(self, key):
"""
:param key:
:return: Number of tokens that have the "REAL" tag ``key``
"""
count = 0
for key2 in self[key].keys():
count += self[key][key2]
return count
def distribution(self, as_string = False, as_csv = False):
d = {k:self.total(k)/self.fulltotal() for k in self.keys()}
if not (as_string or as_csv):
return d
else:
rs = ''
for key, value in d.items():
if as_csv:
key += ','
rs += '{:<8s}{:>8.2f}\n'.format(key, value)
return rs
def sub_distribution(self, key, use_keys=list, add_n = 0):
d = self[key]
return d.distribution(use_keys=use_keys, add_n=add_n)
#===========================================================================
# Stuff that should've been inherited
#===========================================================================
def __str__(self):
return self._dict.__str__()
def __getitem__(self, k):
"""
:rtype : CountDict
"""
return self._dict.__getitem__(k)
def __setitem__(self, k, v):
self._dict.__setitem__(k, v)
def __contains__(self, k):
return self._dict.__contains__(k)
def keys(self):
return self._dict.keys()
def __len__(self):
return self._dict.__len__()
#===============================================================================
#
#===============================================================================
class POSEvalDict(TwoLevelCountDict):
"""
This dictionary is used for evaluation. Items are stored in the dictionary as:
{real_label:{assigned_label:count}}
This also supports greedy mapping techniques for evaluation.
"""
def __init__(self):
TwoLevelCountDict.__init__(self)
self.mapping = {}
def keys(self):
return [str(k) for k in TwoLevelCountDict.keys(self)]
def gold_tags(self):
return list(self.keys())
def assigned_tags(self):
t = {}
for tag_a in self.keys():
for tag_b in self[tag_a].keys():
t[tag_b] = True
return list(t.keys())
def _resetmapping(self):
self.mapping = {t:t for t in self.keys()}
def _mapping(self):
if not self.mapping:
self._resetmapping()
def map(self, k):
return self.mapping[k]
def unmap(self, k):
keys = [o for o, n in self.mapping.items() if n == k]
assert len(keys) == 1
return keys[0]
def overall_breakdown(self, title=None):
ret_s = ''
if title:
ret_s = title+','
ret_s += 'accuracy, matches, total\n'
ret_s += '%.2f,%s,%s\n' % (self.accuracy(), self.all_matches(), self.fulltotal())
return ret_s
def unaligned(self, unaligned_tag = 'UNK'):
if self.fulltotal() == 0:
return 0
else:
return float(self.col_total(unaligned_tag)) / self.fulltotal() * 100
def breakdown_csv(self):
ret_s = 'TAG,PRECISION,RECALL,F_1,IN_GOLD,IN_EVAL,MATCHES\n'
for label in self.keys():
ret_s += '%s,%.2f,%.2f,%.2f,%d,%d,%d\n' % (label,
self.tag_precision(label),
self.tag_recall(label),
self.tag_fmeasure(label),
self.total(label),
self.col_total(label),
self.matches(label))
return ret_s
def matches(self, t):
self._mapping()
if t in self.mapping:
mapped = self.mapping[t]
else:
mapped = t
if mapped in self and mapped in self[mapped]:
return self[mapped][mapped]
else:
return 0
def all_matches(self):
self._mapping()
matches = 0
for t in self.keys():
matches += self.matches(t)
return matches
def accuracy(self):
totals = self.fulltotal()
matches = self.all_matches()
#print('%d/%d' % (matches, totals))
return float(matches / totals) * 100 if totals != 0 else 0
def col_total(self, assigned_tag):
"""
:param assigned_tag: The assigned tag to count
:return: The number of tokens that have been assigned the tag ``assigned_tag``, including false positives.
"""
self._mapping()
totals = 0
for tag_b in self.keys():
totals += self[tag_b][assigned_tag]
return totals
# =============================================================================
# Overall Precision / Recall / FMeasure
# =============================================================================
def precision(self):
totals = 0
matches = 0
for assigned_tag in self.assigned_tags():
totals += self.col_total(assigned_tag)
matches += self.matches(assigned_tag)
return (float(matches) / totals * 100) if totals != 0 else 0
def recall(self):
totals = 0
matches = 0
for tag in self.keys():
totals += self.total(tag)
matches += self.matches(tag)
return float(matches) / totals * 100 if totals != 0 else 0
def fmeasure(self):
p = self.precision()
r = self.recall()
2 * (p*r)/(p+r) if (p+r) != 0 else 0
# =============================================================================
# Tag-Level Precision / Recall / FMeasure
# =============================================================================
def tag_precision(self, tag):
"""
Calculate the precision for a given tag
:type tag: str
:rtype: float
"""
self._mapping()
tag_total = self.col_total(tag)
return (float(self.matches(tag)) / tag_total * 100) if tag_total != 0 else 0
def tag_recall(self, tag):
"""
Calculate recall for a given tag
:param tag: Input tag
:rtype: float
"""
total = self.total(tag)
return float(self.matches(tag)) / total * 100 if total != 0 else 0
def tag_fmeasure(self, tag):
"""
Calculate f-measure for a given tag
:param tag:
:rtype: float
"""
p = self.tag_precision(tag)
r = self.tag_recall(tag)
return 2 * (p*r)/(p+r) if (p+r) != 0 else 0
# =============================================================================
def greedy_n_to_1(self):
"""
Remap the tags in such a way to maximize matches. In this mapping,
multiple output tags can map to the same gold tag.
"""
self._mapping()
for orig_tag in self.keys():
most_matches = 0
best_alt = orig_tag
# Iterate through every alternate
# and see if remapping fares better.
for alt_tag in self.keys():
if self[alt_tag][orig_tag] > most_matches:
most_matches = self[alt_tag][orig_tag]
best_alt = alt_tag
self.mapping[orig_tag] = best_alt
return self.mapping
def greedy_1_to_1(self, debug=False):
"""
Remap the tags one-to-one in such a way as to maximize matches.
This will be similar to bubble sort. Start off with 1:1. Then, go
through each pair of tags and try swapping the two. If we get a net
gain of matches, then keep the swap, otherwise don't. Repeat until we
get a full run of no swaps.
"""
self._mapping()
mapping = self.mapping
while True:
# 2) Now, for each tag, consider swapping it with another tag, and see if
# we improve.
improved = False
for orig_tag, cur_tag in sorted(mapping.items()):
cur_matches = self[orig_tag][cur_tag]
best_alt = cur_tag
swapped = False
best_delta = 0
for alt_tag in sorted(self.keys()):
# alt_tag -- is the tag we are considering swapping
# the mapping for orig_tag to.
# cur_tag -- is the tag that orig_tag is currently
# mapped to.
# alt_parent_tag -- the tag that previously was
# assigned to alt_tag
alt_parent_tag = self.unmap(alt_tag)
# When looking up the possible matches, remember
# that the first bracket will be the original tag
# and the second tag will be what it is mapped to.
# B MATCHES ------------------------------------------------
matches_b_old = self[alt_tag][alt_parent_tag]
# And the matches that we will see if swapped...
matches_b_new = self[cur_tag][alt_parent_tag]
# A MATCHES ------------------------------------------------
# Now, the matches that we will gain by the swap....
matches_a_new = self[alt_tag][orig_tag]
# And where we last were with relationship to the mapping...
matches_a_old = self[cur_tag][orig_tag]
matches_delta = (matches_b_new - matches_b_old) + (matches_a_new - matches_a_old)
if matches_delta > 0:
best_delta = matches_delta
best_alt = alt_tag
swapped = True
# If we have found a better swap...
if swapped:
new_alt = mapping[orig_tag]
mapping[self.unmap(best_alt)] = new_alt
mapping[orig_tag] = best_alt
improved = True
self.mapping = mapping
break
# Break out of the while loop
# if we have not made a swap.
if not improved:
break
self.mapping = mapping
#===========================================================================
def error_matrix(self, csv=False, ansi=False):
"""
Print an error matrix with the columns being the tags assigned by the
system and the rows being the gold standard answers.
"""
self._mapping()
cellwidth = 12
if not csv:
cell = '%%-%ds' % cellwidth
else:
cell='%s,'
keys = sorted(self.keys())
# Print header
header_start = int((len(keys)*cellwidth)/2)-8
if not csv:
ret_s = ' '*header_start + '[PREDICTED ALONG TOP]' + '\n'
else:
ret_s = ''
ret_s += cell % ''
# Print the column labels
for key in keys:
if self.mapping[key] != key:
ret_s += cell % ('%s(%s)' % (key, self.mapping[key]))
else:
ret_s += cell % key
# Add a total and a recall column.
ret_s += '| ' if not csv else ''
ret_s += (cell % 'TOT') + (cell % 'REC')
# Next Line
ret_s += '\n'
#=======================================================================
# Now, print all the middle of the cells
#=======================================================================
for key_b in keys:
ret_s += cell % key_b
rowtotal = 0
for key_a in keys:
# Make it bold
if self.mapping[key_a] == key_b:
if ansi:
ret_s += '\033[94m'
count = self[key_b][key_a]
rowtotal += count
ret_s += cell % count
# Unbold it...
if self.mapping[key_a] == key_b:
if ansi:
ret_s += '\033[0m'
# Add the total for this row...
ret_s += '| ' if not csv else ''
ret_s += cell % rowtotal
# And calc the recall
if rowtotal == 0:
ret_s += cell % ('%.2f' % 0)
else:
ret_s += cell % ('%.2f' % (float(self[key_b][self.mapping[key_b]]) / rowtotal*100))
ret_s += '\n'
#===================================================================
# Finally, print all the stuff at the bottom
#===================================================================
# 1) Print a separator line at the bottom.
#ret_s += cell % '' # ( Skip a cell )
if not csv:
for i in range(len(keys)+1):
ret_s += cell % ('-'*cellwidth)
ret_s += '\n'
# 2) Print the totals for each column
ret_s += cell % 'TOT'
for key_a in keys:
ret_s += cell % (self.col_total(key_a))
ret_s += '\n'
# 3) Print the precision for each column.
ret_s += cell % 'PREC'
for key_a in keys:
ret_s += cell % ('%.2f' % self.tag_precision(key_a))
return ret_s+'\n'
class MatrixTest(unittest.TestCase):
def runTest(self):
ped = POSEvalDict()
ped.add('A','A',1)
ped.add('A','B',2)
ped.add('A','C',4)
ped.add('B','A',3)
ped.add('B','B',1)
ped.add('C','A',1)
# A B C | TOT REC
# A 1 2 4 | 7 14.29
# B 3 1 0 | 4 25.00
# C 1 0 0 | 1 0.00
# --------------------------------
# TOT 5 3 4
# PREC 20.00 33.33 0.00 '''
self.assertEqual(ped.tag_precision('A'), float(1)/5*100)
self.assertEqual(ped.tag_recall('A'), float(1)/7*100)
self.assertEqual(ped.tag_recall('C'), 0)
self.assertEqual(ped['A']['C'], 4)
class GreedyTest(unittest.TestCase):
def runTest(self):
ped = POSEvalDict()
ped.add('A','B',5)
ped.add('A','C',2)
ped.add('B','A',10)
ped.add('C','C',10)
# A B C | TOT REC
# A 0 5 0 | 5 0.00
# B 10 0 0 | 10 0.00
# C 0 0 10 | 10 100.00
# --------------------------------
# TOT 10 5 10
# PREC 0.00 0.00 100.00
ped.greedy_1_to_1()
print(ped.error_matrix(True))
class StatDict(defaultdict):
"""
"""
def __init__(self, type=int):
"""
Constructor
"""
defaultdict.__init__(self, type)
@property
def total(self):
return sum(self.values())
@property
def distribution(self):
return {(k,float(v)/self.total) for k, v in self.items()}
@property
def counts(self):
return set(self.items())
|
mit
| 124,940,156,392,500,220
| 28.152113
| 114
| 0.454172
| false
| 4.051086
| false
| false
| false
|
shoyer/numpy
|
numpy/core/function_base.py
|
2
|
16480
|
from __future__ import division, absolute_import, print_function
import functools
import warnings
import operator
from . import numeric as _nx
from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
TooHardError, asanyarray, ndim)
from numpy.core.multiarray import add_docstring
from numpy.core import overrides
__all__ = ['logspace', 'linspace', 'geomspace']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _index_deprecate(i, stacklevel=2):
try:
i = operator.index(i)
except TypeError:
msg = ("object of type {} cannot be safely interpreted as "
"an integer.".format(type(i)))
i = int(i)
stacklevel += 1
warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
return i
def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
dtype=None, axis=None):
return (start, stop)
@array_function_dispatch(_linspace_dispatcher)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
The starting value of the sequence.
stop : array_like
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float, optional
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
scale (a geometric progression).
logspace : Similar to `geomspace`, but with the end points specified as
logarithms.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
# 2016-02-25, 1.12
num = _index_deprecate(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
# and make sure one can use variables that have an __array_interface__, gh-6634
start = asanyarray(start) * 1.0
stop = asanyarray(stop) * 1.0
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
delta = stop - start
y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta))
# In-place multiplication y *= delta/div is faster, but prevents the multiplicant
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
_mult_inplace = _nx.isscalar(delta)
if num > 1:
step = delta / div
if _nx.any(step == 0):
# Special handling for denormal numbers, gh-5437
y /= div
if _mult_inplace:
y *= delta
else:
y = y * delta
else:
if _mult_inplace:
y *= step
else:
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if axis != 0:
y = _nx.moveaxis(y, 0, axis)
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
dtype=None, axis=None):
return (start, stop)
@array_function_dispatch(_logspace_dispatcher)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
axis=0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
``base ** start`` is the starting value of the sequence.
stop : array_like
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
geomspace : Similar to logspace, but with endpoints specified directly.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype, copy=False)
def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
axis=None):
return (start, stop)
@array_function_dispatch(_geomspace_dispatcher)
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""
Return numbers spaced evenly on a log scale (a geometric progression).
This is similar to `logspace`, but with endpoints specified directly.
Each output sample is a constant multiple of the previous.
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
The starting value of the sequence.
stop : array_like
The final value of the sequence, unless `endpoint` is False.
In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
logspace : Similar to geomspace, but with endpoints specified using log
and base.
linspace : Similar to geomspace, but with arithmetic instead of geometric
progression.
arange : Similar to linspace, with the step size specified instead of the
number of samples.
Notes
-----
If the inputs or dtype are complex, the output will follow a logarithmic
spiral in the complex plane. (There are an infinite number of spirals
passing through two points; the output will follow the shortest such path.)
Examples
--------
>>> np.geomspace(1, 1000, num=4)
array([ 1., 10., 100., 1000.])
>>> np.geomspace(1, 1000, num=3, endpoint=False)
array([ 1., 10., 100.])
>>> np.geomspace(1, 1000, num=4, endpoint=False)
array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
>>> np.geomspace(1, 256, num=9)
array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
Note that the above may not produce exact integers:
>>> np.geomspace(1, 256, num=9, dtype=int)
array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
>>> np.around(np.geomspace(1, 256, num=9)).astype(int)
array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
Negative, decreasing, and complex inputs are allowed:
>>> np.geomspace(1000, 1, num=4)
array([1000., 100., 10., 1.])
>>> np.geomspace(-1000, -1, num=4)
array([-1000., -100., -10., -1.])
>>> np.geomspace(1j, 1000j, num=4) # Straight line
array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
>>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,
1.00000000e+00+0.00000000e+00j])
Graphical illustration of ``endpoint`` parameter:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> y = np.zeros(N)
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.axis([0.5, 2000, 0, 3])
[0.5, 2000, 0, 3]
>>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
>>> plt.show()
"""
start = asanyarray(start)
stop = asanyarray(stop)
if _nx.any(start == 0) or _nx.any(stop == 0):
raise ValueError('Geometric sequence cannot include zero')
dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
if dtype is None:
dtype = dt
else:
# complex to dtype('complex128'), for instance
dtype = _nx.dtype(dtype)
# Promote both arguments to the same dtype in case, for instance, one is
# complex and another is negative and log would produce NaN otherwise.
# Copy since we may change things in-place further down.
start = start.astype(dt, copy=True)
stop = stop.astype(dt, copy=True)
out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
# Avoid negligible real or imaginary parts in output by rotating to
# positive real, calculating, then undoing rotation
if _nx.issubdtype(dt, _nx.complexfloating):
all_imag = (start.real == 0.) & (stop.real == 0.)
if _nx.any(all_imag):
start[all_imag] = start[all_imag].imag
stop[all_imag] = stop[all_imag].imag
out_sign[all_imag] = 1j
both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
if _nx.any(both_negative):
_nx.negative(start, out=start, where=both_negative)
_nx.negative(stop, out=stop, where=both_negative)
_nx.negative(out_sign, out=out_sign, where=both_negative)
log_start = _nx.log10(start)
log_stop = _nx.log10(stop)
result = out_sign * logspace(log_start, log_stop, num=num,
endpoint=endpoint, base=10.0, dtype=dtype)
if axis != 0:
result = _nx.moveaxis(result, 0, axis)
return result.astype(dtype, copy=False)
#always succeed
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except Exception:
pass
|
bsd-3-clause
| 124,672,043,304,796,380
| 34.44086
| 88
| 0.600303
| false
| 3.581052
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.